From 064e050027239d57d17d81d1ffd76445989931a8 Mon Sep 17 00:00:00 2001 From: "retrox.jcy" Date: Fri, 24 May 2024 11:45:51 +0800 Subject: [PATCH 01/13] feat: terminal intell new --- ...inal-intell-command-controller.module.less | 38 + .../terminal-intell-command-controller.tsx | 119 ++ .../src/browser/contribution/index.ts | 1 + .../browser/contribution/terminal.intell.ts | 18 + packages/terminal-next/src/browser/index.ts | 2 + .../src/browser/intell/README.md | 23 + .../intell/intell-terminal.service.tsx | 272 ++++ .../src/browser/intell/runtime/bundle.js | 1179 +++++++++++++++++ .../src/browser/intell/runtime/generator.ts | 73 + .../src/browser/intell/runtime/model.ts | 17 + .../src/browser/intell/runtime/parser.ts | 75 ++ .../src/browser/intell/runtime/runtime.ts | 345 +++++ .../src/browser/intell/runtime/suggestion.ts | 293 ++++ .../src/browser/intell/runtime/template.ts | 127 ++ .../src/browser/intell/runtime/utils.ts | 65 + .../src/browser/intell/utils/ansi.ts | 36 + .../src/browser/intell/utils/log.ts | 19 + .../src/node/stupid-shell-intergration.ts | 53 + 18 files changed, 2755 insertions(+) create mode 100644 packages/terminal-next/src/browser/component/terminal-intell-command-controller.module.less create mode 100644 packages/terminal-next/src/browser/component/terminal-intell-command-controller.tsx create mode 100644 packages/terminal-next/src/browser/contribution/terminal.intell.ts create mode 100644 packages/terminal-next/src/browser/intell/README.md create mode 100644 packages/terminal-next/src/browser/intell/intell-terminal.service.tsx create mode 100644 packages/terminal-next/src/browser/intell/runtime/bundle.js create mode 100644 packages/terminal-next/src/browser/intell/runtime/generator.ts create mode 100644 packages/terminal-next/src/browser/intell/runtime/model.ts create mode 100644 packages/terminal-next/src/browser/intell/runtime/parser.ts create mode 100644 packages/terminal-next/src/browser/intell/runtime/runtime.ts create mode 100644 packages/terminal-next/src/browser/intell/runtime/suggestion.ts create mode 100644 packages/terminal-next/src/browser/intell/runtime/template.ts create mode 100644 packages/terminal-next/src/browser/intell/runtime/utils.ts create mode 100644 packages/terminal-next/src/browser/intell/utils/ansi.ts create mode 100644 packages/terminal-next/src/browser/intell/utils/log.ts create mode 100644 packages/terminal-next/src/node/stupid-shell-intergration.ts diff --git a/packages/terminal-next/src/browser/component/terminal-intell-command-controller.module.less b/packages/terminal-next/src/browser/component/terminal-intell-command-controller.module.less new file mode 100644 index 0000000000..0e7dd0435b --- /dev/null +++ b/packages/terminal-next/src/browser/component/terminal-intell-command-controller.module.less @@ -0,0 +1,38 @@ +.suggestions { + display: flex; + flex-direction: column; + background-color: var(--editorGroupHeader-tabsBackground); + color: var(--ai-native-text-color-common); + max-height: 350px; + width: 500px; + overflow-y: auto; + position: absolute; + bottom: 100%; + left: 0; + border-top-left-radius: 8px; + border-top-right-radius: 8px; +} + +.suggestionItem { + padding: 6px 10px 6px 16px; + cursor: pointer; +} + +.suggestionItemContainer { + display: flex; + flex-direction: column; +} + +.suggestionDesc { + font-size: 12px; +} + +.suggestionCmd { + font-size: 12px; + opacity: 0.6; +} + +.suggestionItem:hover { + filter: brightness(110%); + background-color: var(--selection-background); +} \ No newline at end of file diff --git a/packages/terminal-next/src/browser/component/terminal-intell-command-controller.tsx b/packages/terminal-next/src/browser/component/terminal-intell-command-controller.tsx new file mode 100644 index 0000000000..a66d4801b8 --- /dev/null +++ b/packages/terminal-next/src/browser/component/terminal-intell-command-controller.tsx @@ -0,0 +1,119 @@ +import React, { useEffect, useState } from 'react'; + +import { Emitter } from '@opensumi/ide-core-common'; + +import styles from './terminal-intell-command-controller.module.less'; + +export interface SmartCommandDesc { + description: string; + command: string; +} + +// 支持键盘选择的列表 +export const KeyboardSelectableList = (props: { + items: { description: string; command: string }[]; + handleSuggestionClick: (command: string) => void; + controller?: Emitter; + noListen?: boolean; +}) => { + const { items, handleSuggestionClick, noListen = false, controller } = props; + // 选中项的索引,默认为最后一个 + const [selectedIndex, setSelectedIndex] = useState(-1); + + // 处理键盘事件 + const handleKeyPress = (event: KeyboardEvent) => { + switch (event.key) { + case 'ArrowUp': // 上键 + setSelectedIndex((prevIndex) => Math.max(prevIndex - 1, 0)); + break; + case 'ArrowDown': // 下键 + setSelectedIndex((prevIndex) => + Math.min(prevIndex + 1, items.length - 1), + ); + break; + case 'Enter': // 回车键 + if (items[selectedIndex]) { + handleSuggestionClick(items[selectedIndex].command); + } + break; + default: + break; + } + }; + + // 添加全局键盘事件监听器 + useEffect(() => { + if (noListen) {return;} + window.addEventListener('keydown', handleKeyPress); + return () => { + window.removeEventListener('keydown', handleKeyPress); + }; + }, [items, selectedIndex]); + + useEffect(() => { + if (!controller) {return;} + const disposable = controller.event((e: string) => { + if (e === 'ArrowUp') { + setSelectedIndex((prevIndex) => Math.max(prevIndex - 1, 0)); + } + if (e === 'ArrowDown' || e === 'Tab') { + setSelectedIndex((prevIndex) => + Math.min(prevIndex + 1, items.length - 1), + ); + } + if (e === 'Enter') { + if (items[selectedIndex]) { + handleSuggestionClick(items[selectedIndex].command); + } + } + }); + + return () => { + disposable.dispose(); + }; + }, [controller, selectedIndex, items]); + + useEffect(() => { + // HACK 定位到顶部 + setSelectedIndex(0); + }, [items]); + + return ( +
+ {items.map((cmd, index) => ( +
handleSuggestionClick(cmd.command)} + > +
+
{cmd.description}
+
{cmd.command}
+
+
+ ))} +
+ ); +}; + +export const TerminalIntellCommandController = (props: { + suggestions: SmartCommandDesc[]; + controller: Emitter; + onSuggestion: (suggestion: string) => void; +}) => { + const { suggestions, controller, onSuggestion } = props; + + return ( +
+ {suggestions.length > 0 && ( + + )} +
+ ); +}; diff --git a/packages/terminal-next/src/browser/contribution/index.ts b/packages/terminal-next/src/browser/contribution/index.ts index 50a09868a2..bb00a7b451 100644 --- a/packages/terminal-next/src/browser/contribution/index.ts +++ b/packages/terminal-next/src/browser/contribution/index.ts @@ -5,3 +5,4 @@ export * from './terminal.view'; export * from './terminal.keybinding'; export * from './terminal.network'; export * from './terminal.preference'; +export * from './terminal.intell'; diff --git a/packages/terminal-next/src/browser/contribution/terminal.intell.ts b/packages/terminal-next/src/browser/contribution/terminal.intell.ts new file mode 100644 index 0000000000..8dac80719f --- /dev/null +++ b/packages/terminal-next/src/browser/contribution/terminal.intell.ts @@ -0,0 +1,18 @@ +import { Autowired } from '@opensumi/di'; +import { ClientAppContribution, Domain } from '@opensumi/ide-core-browser'; +import { + MaybePromise, +} from '@opensumi/ide-core-common'; + +import { IntellTerminalService } from '../intell/intell-terminal.service'; + +@Domain(ClientAppContribution) +export class TerminalIntellContribution implements ClientAppContribution { + + @Autowired(IntellTerminalService) + intellTerminalService: IntellTerminalService; + + onDidStart(): MaybePromise { + this.intellTerminalService.active(); + } +} diff --git a/packages/terminal-next/src/browser/index.ts b/packages/terminal-next/src/browser/index.ts index d77d015636..344edb706d 100644 --- a/packages/terminal-next/src/browser/index.ts +++ b/packages/terminal-next/src/browser/index.ts @@ -25,6 +25,7 @@ import { ITerminalPreference } from '../common/preference'; import { TerminalCommandContribution, + TerminalIntellContribution, TerminalKeybindingContribution, TerminalLifeCycleContribution, TerminalMenuContribution, @@ -60,6 +61,7 @@ export class TerminalNextModule extends BrowserModule { TerminalKeybindingContribution, TerminalNetworkContribution, TerminalPreferenceContribution, + TerminalIntellContribution, { token: ITerminalApiService, useClass: TerminalApiService, diff --git a/packages/terminal-next/src/browser/intell/README.md b/packages/terminal-next/src/browser/intell/README.md new file mode 100644 index 0000000000..cc586f64e9 --- /dev/null +++ b/packages/terminal-next/src/browser/intell/README.md @@ -0,0 +1,23 @@ +## 终端的智能补全能力 + +终端的智能补全能力是指终端在输入命令时,能够根据用户的输入,自动提示可能的命令或参数,交互方式类似于编程时的语言服务。 + +此功能可以增加用户使用终端时的易用性。 + +## 功能建设 +此功能目前依然处于 Alpha 的早期实验版本,这里列举需要讨论或者处理的问题: + +- [ ] 补全设计优化:目前的设计主要服务于功能验证,因此 UI 看起来很简陋,需要做后续的优化 +- [ ] 补全交互方式优化:比如说 上下键选择,Tab 确认。或者 Tab 或者 上下键 选择,Enter 确认 +- [ ] Generator 补全支持,目前还不支持调用命令的 补全,因为是基于前端做的,可能要做个前后端通信 +- [ ] 渲染方式优化,目前是直接渲染在 Xterm.js Decorations 上面的,考虑做一个全局 DOM,然后通过 DOM Align + Xterm.js Decoration 来做生命周期绑定和位置绑定 +- [ ] 讨论是否需要转移补全逻辑到 Node.js +- [ ] 把基于 Fig 打包 bundle 的逻辑转移到 OpenSumi 这边 +- [ ] CodeStyle 处理,目前没有对从 inShellisense 项目的代码做处理,考虑到未来比较方便更新代码,不过这块要看看是不是要格式化一下代码什么的 + +## 开源项目依赖 +感谢开源项目提供的灵感和相关能力支持: + +https://github.com/withfig/autocomplete + +https://github.com/microsoft/inshellisense \ No newline at end of file diff --git a/packages/terminal-next/src/browser/intell/intell-terminal.service.tsx b/packages/terminal-next/src/browser/intell/intell-terminal.service.tsx new file mode 100644 index 0000000000..2e32c70fe3 --- /dev/null +++ b/packages/terminal-next/src/browser/intell/intell-terminal.service.tsx @@ -0,0 +1,272 @@ +import React from 'react'; +import ReactDOM from 'react-dom'; +import { IDecoration, IDisposable, IMarker, Terminal } from 'xterm'; + +import { Autowired, Injectable } from '@opensumi/di'; +import { Disposable, Emitter, FileType, Uri } from '@opensumi/ide-core-common'; +import { DiskFileServicePath, IDiskFileProvider } from '@opensumi/ide-file-service'; +import { + ITerminalConnection, + ITerminalController, +} from '@opensumi/ide-terminal-next'; + +import { TerminalIntellCommandController } from '../component/terminal-intell-command-controller'; + +import { getSuggestions } from './runtime/runtime'; +import { fsAsyncStub } from './runtime/template'; +// @ts-ignore +// import Fig from '@withfig/autocomplete-types'; + +// 基于 PS1 Hack 的终端 AI 能力集成 + +enum IstermOscPt { + PromptStarted = 'PS', + PromptEnded = 'PE', + CurrentWorkingDirectory = 'CWD' +} + +@Injectable() +export class IntellTerminalService extends Disposable { + @Autowired(ITerminalController) + private terminalController: ITerminalController; + + @Autowired(DiskFileServicePath) + private diskFileProvider: IDiskFileProvider; + + private controlEmitter = new Emitter(); + + private popupContainer: HTMLDivElement; // AI 终端提示 弹出框容器 + + private promptStartMarker: IMarker | undefined; + private promptStartDecoration: IDecoration | undefined; + private promptEndMarker: IMarker | undefined; + private promptEndDecoration: IDecoration | undefined; + private onDataDisposable: IDisposable; + + private lastPromptLineString: string; + private isShellIntellActive: boolean; + + private currentSessionId: string; + + public active() { + this.disposables.push( + this.terminalController.onDidOpenTerminal(({ id }) => + this.listenTerminalEvent(id), + ), + ); + } + + private listenTerminalEvent(clientId: string) { + const client = this.terminalController.clients.get(clientId); + + if (client) { + try { + this.listenPromptState(client.term); + } catch (e) { + console.error('listenTerminalEvent', e); + } + } + } + + private writeNewLines(terminal: Terminal, numberOfLines: number) { + let newLines = ''; + for (let i = 0; i < numberOfLines; i++) { + newLines += '\n'; + } + terminal.write(newLines); + } + + private listenPromptState(xterm: Terminal) { + // HACK 不知道注入这么多空行是否合理? + this.writeNewLines(xterm, xterm.rows); + + xterm.parser.registerOscHandler(6973, (data) => { + const argsIndex = data.indexOf(';'); + const sequence = argsIndex === -1 ? data : data.substring(0, argsIndex); + + switch (sequence) { + case IstermOscPt.PromptStarted: + // this.handlePromptStart(xterm); + break; + case IstermOscPt.PromptEnded: + this.handlePromptEnd(xterm); + break; + default: + return false; + } + return false; + }); + } + + private handlePromptEnd(xterm: Terminal) { + if (this.promptEndMarker) { + this.promptEndMarker.dispose(); + } + if (this.promptEndDecoration) { + this.promptEndDecoration.dispose(); + } + this.promptEndMarker = xterm.registerMarker(0); + const xOffset2 = xterm.buffer.active.cursorX; + + if (this.onDataDisposable) { + this.onDataDisposable.dispose(); + } + + window.getSuggestions = getSuggestions; + + // HACK: 这里拿去 TerminalConnection 的方式很 Hack,看看有没有更好的办法? + // @ts-ignore + const attachAddon = xterm._addonManager._addons.find((addon) => !!addon?.instance?.connection); + + // Hack Attachaddon + const connection = attachAddon?.instance?.connection as ITerminalConnection; + + // HACK: hack readonly 避免 attachAddon 发送数据到后端,后面需要做个 onData 的拦截逻辑 + connection.readonly = true; + + let notReRender = false; + + this.onDataDisposable = xterm.onData(async (e) => { + console.time('Terminal onData'); + switch (e) { + case '\x1b': + console.log('ESC 键被按下'); + this.controlEmitter.fire('Escape'); + break; + case '\x1b[A': + console.log('上方向键被按下'); + this.controlEmitter.fire('ArrowUp'); + notReRender = true; + break; + case '\x1b[B': + console.log('下方向键被按下'); + this.controlEmitter.fire('ArrowDown'); + notReRender = true; + break; + case '\t': + case '\x09': // 或者使用 '\t' + console.log('Tab 键被按下'); + this.controlEmitter.fire('Tab'); + notReRender = this.isShellIntellActive; + break; + case '\r': + case '\x0D': + if (this.isShellIntellActive) { + // 如果对话框被激活的话,不触发 pty 的 Enter 事件,转发到对话框里面 + this.controlEmitter.fire('Enter'); + } else { + connection.sendData(e); + } + console.log('Enter 键被按下'); + + break; + default: + // console.log('其他按键输入:', e); + connection.sendData(e); + attachAddon?.instance?._onInput?.fire(e); + } + + console.log('e', JSON.stringify(e)); + if (e === '\x1b' && this.promptEndDecoration) { + console.log('promptEndDecoration dispose'); + console.timeEnd('Term onData'); + this.promptEndDecoration.dispose(); + return; + } + // 获取当前活动缓冲区 + const buffer = xterm.buffer.active; + + // 获取光标位置 + const cursorX = buffer.cursorX; + const cursorY = buffer.cursorY; + + const lineData = buffer.getLine(this.promptEndMarker?.line || 0); + const lineDataString = lineData?.translateToString( + false, + xOffset2, + cursorX, + ); + console.log('lineDataString', lineDataString); + + // 避免 上下方向键导致重复渲染 + // if (JSON.stringify(lineDataString) === this.lastPromptLineString) { + // console.log('Terminal Buffer 数据相同,不需要重新渲染') + // return; + // } + + if (notReRender) { + return; + } + + if (lineDataString && this.promptEndMarker) { + + fsAsyncStub.setProxy({ + readdir: async (cwd: string, options: { withFileTypes: true }) => { + const res = await this.diskFileProvider.readDirectory(Uri.file(cwd)); + const files = res.map(([name, type]) => ({ + name, + isFile: () => type === FileType.File, + isDirectory: () => type === FileType.Directory, + })); + console.log('readdir', cwd, options, res, files); + return files; + }, + + }); + + const suggestionBlob = await getSuggestions( + lineDataString, + '/home/admin/retrox.jcy/cloud-ide/api-server', + ); + + console.log( + 'suggestionBlob', + suggestionBlob, + 'lineDataString', + JSON.stringify(lineDataString), + ); + this.promptEndDecoration?.dispose(); + + if (suggestionBlob && suggestionBlob.suggestions) { + this.lastPromptLineString = JSON.stringify(lineDataString); + this.promptEndDecoration = xterm.registerDecoration({ + marker: this.promptEndMarker, + width: 1, + // backgroundColor: '#2472C8', + height: 1, + x: cursorX, + }); + console.log('render termianl intell react component'); + const suggestionsViewModel = [...suggestionBlob.suggestions.map((suggestion) => ({ + description: suggestion.description || '', + command: suggestion.name, + }))]; + this.promptEndDecoration?.onRender((element) => { + ReactDOM.render( + { + const dropCharNum = suggestionBlob.charactersToDrop || 0; + const insertStr = suggestion.substring(dropCharNum); + connection.sendData(insertStr); + this.promptEndDecoration?.dispose(); + }} + />, + element, + ); + this.isShellIntellActive = true; + }); + this.promptEndDecoration?.onDispose(() => { + if (this.promptEndDecoration?.element) { + this.isShellIntellActive = false; + console.log('dispose react component'); + ReactDOM.unmountComponentAtNode(this.promptEndDecoration?.element); + } + }); + } + } + console.timeEnd('Term onData'); + }); + } +} diff --git a/packages/terminal-next/src/browser/intell/runtime/bundle.js b/packages/terminal-next/src/browser/intell/runtime/bundle.js new file mode 100644 index 0000000000..cc6a8f442c --- /dev/null +++ b/packages/terminal-next/src/browser/intell/runtime/bundle.js @@ -0,0 +1,1179 @@ +/* eslint-disable */ +/* + * ATTENTION: The "eval" devtool has been used (maybe by default in mode: "development"). + * This devtool is neither made for production nor for readable output files. + * It uses "eval()" calls to create a separate source file in the browser devtools. + * If you are trying to read the output file, select a different devtool (https://webpack.js.org/configuration/devtool/) + * or disable the default devtool with "devtool: false". + * If you are looking for production-ready output files, see mode: "production" (https://webpack.js.org/configuration/mode/). + */ +(function webpackUniversalModuleDefinition(root, factory) { + if(typeof exports === 'object' && typeof module === 'object') + module.exports = factory(); + else if(typeof define === 'function' && define.amd) + define([], factory); + else if(typeof exports === 'object') + exports["SuggestionsBundle"] = factory(); + else + root["SuggestionsBundle"] = factory(); +})(this, () => { +return /******/ (() => { // webpackBootstrap +/******/ "use strict"; +/******/ var __webpack_modules__ = ({ + +/***/ "./src/index.js": +/*!**********************!*\ + !*** ./src/index.js ***! + \**********************/ +/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (__WEBPACK_DEFAULT_EXPORT__)\n/* harmony export */ });\n/* harmony import */ var _withfig_autocomplete_build_ansible_doc_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @withfig/autocomplete/build/ansible-doc.js */ \"./node_modules/@withfig/autocomplete/build/ansible-doc.js\");\n/* harmony import */ var _withfig_autocomplete_build_ansible_galaxy_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! @withfig/autocomplete/build/ansible-galaxy.js */ \"./node_modules/@withfig/autocomplete/build/ansible-galaxy.js\");\n/* harmony import */ var _withfig_autocomplete_build_ansible_playbook_js__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! @withfig/autocomplete/build/ansible-playbook.js */ \"./node_modules/@withfig/autocomplete/build/ansible-playbook.js\");\n/* harmony import */ var _withfig_autocomplete_build_ansible_js__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! @withfig/autocomplete/build/ansible.js */ \"./node_modules/@withfig/autocomplete/build/ansible.js\");\n/* harmony import */ var _withfig_autocomplete_build_aws_js__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! @withfig/autocomplete/build/aws.js */ \"./node_modules/@withfig/autocomplete/build/aws.js\");\n/* harmony import */ var _withfig_autocomplete_build_bundle_js__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! @withfig/autocomplete/build/bundle.js */ \"./node_modules/@withfig/autocomplete/build/bundle.js\");\n/* harmony import */ var _withfig_autocomplete_build_cargo_js__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! @withfig/autocomplete/build/cargo.js */ \"./node_modules/@withfig/autocomplete/build/cargo.js\");\n/* harmony import */ var _withfig_autocomplete_build_cat_js__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! @withfig/autocomplete/build/cat.js */ \"./node_modules/@withfig/autocomplete/build/cat.js\");\n/* harmony import */ var _withfig_autocomplete_build_cd_js__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(/*! @withfig/autocomplete/build/cd.js */ \"./node_modules/@withfig/autocomplete/build/cd.js\");\n/* harmony import */ var _withfig_autocomplete_build_chmod_js__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! @withfig/autocomplete/build/chmod.js */ \"./node_modules/@withfig/autocomplete/build/chmod.js\");\n/* harmony import */ var _withfig_autocomplete_build_chown_js__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(/*! @withfig/autocomplete/build/chown.js */ \"./node_modules/@withfig/autocomplete/build/chown.js\");\n/* harmony import */ var _withfig_autocomplete_build_circleci_js__WEBPACK_IMPORTED_MODULE_11__ = __webpack_require__(/*! @withfig/autocomplete/build/circleci.js */ \"./node_modules/@withfig/autocomplete/build/circleci.js\");\n/* harmony import */ var _withfig_autocomplete_build_clang_js__WEBPACK_IMPORTED_MODULE_12__ = __webpack_require__(/*! @withfig/autocomplete/build/clang.js */ \"./node_modules/@withfig/autocomplete/build/clang.js\");\n/* harmony import */ var _withfig_autocomplete_build_crontab_js__WEBPACK_IMPORTED_MODULE_13__ = __webpack_require__(/*! @withfig/autocomplete/build/crontab.js */ \"./node_modules/@withfig/autocomplete/build/crontab.js\");\n/* harmony import */ var _withfig_autocomplete_build_curl_js__WEBPACK_IMPORTED_MODULE_14__ = __webpack_require__(/*! @withfig/autocomplete/build/curl.js */ \"./node_modules/@withfig/autocomplete/build/curl.js\");\n/* harmony import */ var _withfig_autocomplete_build_cut_js__WEBPACK_IMPORTED_MODULE_15__ = __webpack_require__(/*! @withfig/autocomplete/build/cut.js */ \"./node_modules/@withfig/autocomplete/build/cut.js\");\n/* harmony import */ var _withfig_autocomplete_build_df_js__WEBPACK_IMPORTED_MODULE_16__ = __webpack_require__(/*! @withfig/autocomplete/build/df.js */ \"./node_modules/@withfig/autocomplete/build/df.js\");\n/* harmony import */ var _withfig_autocomplete_build_diff_js__WEBPACK_IMPORTED_MODULE_17__ = __webpack_require__(/*! @withfig/autocomplete/build/diff.js */ \"./node_modules/@withfig/autocomplete/build/diff.js\");\n/* harmony import */ var _withfig_autocomplete_build_dig_js__WEBPACK_IMPORTED_MODULE_18__ = __webpack_require__(/*! @withfig/autocomplete/build/dig.js */ \"./node_modules/@withfig/autocomplete/build/dig.js\");\n/* harmony import */ var _withfig_autocomplete_build_docker_compose_js__WEBPACK_IMPORTED_MODULE_19__ = __webpack_require__(/*! @withfig/autocomplete/build/docker-compose.js */ \"./node_modules/@withfig/autocomplete/build/docker-compose.js\");\n/* harmony import */ var _withfig_autocomplete_build_docker_js__WEBPACK_IMPORTED_MODULE_20__ = __webpack_require__(/*! @withfig/autocomplete/build/docker.js */ \"./node_modules/@withfig/autocomplete/build/docker.js\");\n/* harmony import */ var _withfig_autocomplete_build_dotnet_js__WEBPACK_IMPORTED_MODULE_21__ = __webpack_require__(/*! @withfig/autocomplete/build/dotnet.js */ \"./node_modules/@withfig/autocomplete/build/dotnet.js\");\n/* harmony import */ var _withfig_autocomplete_build_du_js__WEBPACK_IMPORTED_MODULE_22__ = __webpack_require__(/*! @withfig/autocomplete/build/du.js */ \"./node_modules/@withfig/autocomplete/build/du.js\");\n/* harmony import */ var _withfig_autocomplete_build_echo_js__WEBPACK_IMPORTED_MODULE_23__ = __webpack_require__(/*! @withfig/autocomplete/build/echo.js */ \"./node_modules/@withfig/autocomplete/build/echo.js\");\n/* harmony import */ var _withfig_autocomplete_build_eleventy_js__WEBPACK_IMPORTED_MODULE_24__ = __webpack_require__(/*! @withfig/autocomplete/build/eleventy.js */ \"./node_modules/@withfig/autocomplete/build/eleventy.js\");\n/* harmony import */ var _withfig_autocomplete_build_env_js__WEBPACK_IMPORTED_MODULE_25__ = __webpack_require__(/*! @withfig/autocomplete/build/env.js */ \"./node_modules/@withfig/autocomplete/build/env.js\");\n/* harmony import */ var _withfig_autocomplete_build_export_js__WEBPACK_IMPORTED_MODULE_26__ = __webpack_require__(/*! @withfig/autocomplete/build/export.js */ \"./node_modules/@withfig/autocomplete/build/export.js\");\n/* harmony import */ var _withfig_autocomplete_build_ffmpeg_js__WEBPACK_IMPORTED_MODULE_27__ = __webpack_require__(/*! @withfig/autocomplete/build/ffmpeg.js */ \"./node_modules/@withfig/autocomplete/build/ffmpeg.js\");\n/* harmony import */ var _withfig_autocomplete_build_find_js__WEBPACK_IMPORTED_MODULE_28__ = __webpack_require__(/*! @withfig/autocomplete/build/find.js */ \"./node_modules/@withfig/autocomplete/build/find.js\");\n/* harmony import */ var _withfig_autocomplete_build_firebase_js__WEBPACK_IMPORTED_MODULE_29__ = __webpack_require__(/*! @withfig/autocomplete/build/firebase.js */ \"./node_modules/@withfig/autocomplete/build/firebase.js\");\n/* harmony import */ var _withfig_autocomplete_build_g_js__WEBPACK_IMPORTED_MODULE_30__ = __webpack_require__(/*! @withfig/autocomplete/build/g++.js */ \"./node_modules/@withfig/autocomplete/build/g++.js\");\n/* harmony import */ var _withfig_autocomplete_build_gatsby_js__WEBPACK_IMPORTED_MODULE_31__ = __webpack_require__(/*! @withfig/autocomplete/build/gatsby.js */ \"./node_modules/@withfig/autocomplete/build/gatsby.js\");\n/* harmony import */ var _withfig_autocomplete_build_gcc_js__WEBPACK_IMPORTED_MODULE_32__ = __webpack_require__(/*! @withfig/autocomplete/build/gcc.js */ \"./node_modules/@withfig/autocomplete/build/gcc.js\");\n/* harmony import */ var _withfig_autocomplete_build_gcloud_js__WEBPACK_IMPORTED_MODULE_33__ = __webpack_require__(/*! @withfig/autocomplete/build/gcloud.js */ \"./node_modules/@withfig/autocomplete/build/gcloud.js\");\n/* harmony import */ var _withfig_autocomplete_build_gem_js__WEBPACK_IMPORTED_MODULE_34__ = __webpack_require__(/*! @withfig/autocomplete/build/gem.js */ \"./node_modules/@withfig/autocomplete/build/gem.js\");\n/* harmony import */ var _withfig_autocomplete_build_git_js__WEBPACK_IMPORTED_MODULE_35__ = __webpack_require__(/*! @withfig/autocomplete/build/git.js */ \"./node_modules/@withfig/autocomplete/build/git.js\");\n/* harmony import */ var _withfig_autocomplete_build_go_js__WEBPACK_IMPORTED_MODULE_36__ = __webpack_require__(/*! @withfig/autocomplete/build/go.js */ \"./node_modules/@withfig/autocomplete/build/go.js\");\n/* harmony import */ var _withfig_autocomplete_build_gpg_js__WEBPACK_IMPORTED_MODULE_37__ = __webpack_require__(/*! @withfig/autocomplete/build/gpg.js */ \"./node_modules/@withfig/autocomplete/build/gpg.js\");\n/* harmony import */ var _withfig_autocomplete_build_gradle_js__WEBPACK_IMPORTED_MODULE_38__ = __webpack_require__(/*! @withfig/autocomplete/build/gradle.js */ \"./node_modules/@withfig/autocomplete/build/gradle.js\");\n/* harmony import */ var _withfig_autocomplete_build_grep_js__WEBPACK_IMPORTED_MODULE_39__ = __webpack_require__(/*! @withfig/autocomplete/build/grep.js */ \"./node_modules/@withfig/autocomplete/build/grep.js\");\n/* harmony import */ var _withfig_autocomplete_build_head_js__WEBPACK_IMPORTED_MODULE_40__ = __webpack_require__(/*! @withfig/autocomplete/build/head.js */ \"./node_modules/@withfig/autocomplete/build/head.js\");\n/* harmony import */ var _withfig_autocomplete_build_helm_js__WEBPACK_IMPORTED_MODULE_41__ = __webpack_require__(/*! @withfig/autocomplete/build/helm.js */ \"./node_modules/@withfig/autocomplete/build/helm.js\");\n/* harmony import */ var _withfig_autocomplete_build_hexo_js__WEBPACK_IMPORTED_MODULE_42__ = __webpack_require__(/*! @withfig/autocomplete/build/hexo.js */ \"./node_modules/@withfig/autocomplete/build/hexo.js\");\n/* harmony import */ var _withfig_autocomplete_build_htop_js__WEBPACK_IMPORTED_MODULE_43__ = __webpack_require__(/*! @withfig/autocomplete/build/htop.js */ \"./node_modules/@withfig/autocomplete/build/htop.js\");\n/* harmony import */ var _withfig_autocomplete_build_hugo_js__WEBPACK_IMPORTED_MODULE_44__ = __webpack_require__(/*! @withfig/autocomplete/build/hugo.js */ \"./node_modules/@withfig/autocomplete/build/hugo.js\");\n/* harmony import */ var _withfig_autocomplete_build_java_js__WEBPACK_IMPORTED_MODULE_45__ = __webpack_require__(/*! @withfig/autocomplete/build/java.js */ \"./node_modules/@withfig/autocomplete/build/java.js\");\n/* harmony import */ var _withfig_autocomplete_build_jest_js__WEBPACK_IMPORTED_MODULE_46__ = __webpack_require__(/*! @withfig/autocomplete/build/jest.js */ \"./node_modules/@withfig/autocomplete/build/jest.js\");\n/* harmony import */ var _withfig_autocomplete_build_kill_js__WEBPACK_IMPORTED_MODULE_47__ = __webpack_require__(/*! @withfig/autocomplete/build/kill.js */ \"./node_modules/@withfig/autocomplete/build/kill.js\");\n/* harmony import */ var _withfig_autocomplete_build_kind_js__WEBPACK_IMPORTED_MODULE_48__ = __webpack_require__(/*! @withfig/autocomplete/build/kind.js */ \"./node_modules/@withfig/autocomplete/build/kind.js\");\n/* harmony import */ var _withfig_autocomplete_build_kubectl_js__WEBPACK_IMPORTED_MODULE_49__ = __webpack_require__(/*! @withfig/autocomplete/build/kubectl.js */ \"./node_modules/@withfig/autocomplete/build/kubectl.js\");\n/* harmony import */ var _withfig_autocomplete_build_less_js__WEBPACK_IMPORTED_MODULE_50__ = __webpack_require__(/*! @withfig/autocomplete/build/less.js */ \"./node_modules/@withfig/autocomplete/build/less.js\");\n/* harmony import */ var _withfig_autocomplete_build_ls_js__WEBPACK_IMPORTED_MODULE_51__ = __webpack_require__(/*! @withfig/autocomplete/build/ls.js */ \"./node_modules/@withfig/autocomplete/build/ls.js\");\n/* harmony import */ var _withfig_autocomplete_build_make_js__WEBPACK_IMPORTED_MODULE_52__ = __webpack_require__(/*! @withfig/autocomplete/build/make.js */ \"./node_modules/@withfig/autocomplete/build/make.js\");\n/* harmony import */ var _withfig_autocomplete_build_minikube_js__WEBPACK_IMPORTED_MODULE_53__ = __webpack_require__(/*! @withfig/autocomplete/build/minikube.js */ \"./node_modules/@withfig/autocomplete/build/minikube.js\");\n/* harmony import */ var _withfig_autocomplete_build_more_js__WEBPACK_IMPORTED_MODULE_54__ = __webpack_require__(/*! @withfig/autocomplete/build/more.js */ \"./node_modules/@withfig/autocomplete/build/more.js\");\n/* harmony import */ var _withfig_autocomplete_build_mount_js__WEBPACK_IMPORTED_MODULE_55__ = __webpack_require__(/*! @withfig/autocomplete/build/mount.js */ \"./node_modules/@withfig/autocomplete/build/mount.js\");\n/* harmony import */ var _withfig_autocomplete_build_mvn_js__WEBPACK_IMPORTED_MODULE_56__ = __webpack_require__(/*! @withfig/autocomplete/build/mvn.js */ \"./node_modules/@withfig/autocomplete/build/mvn.js\");\n/* harmony import */ var _withfig_autocomplete_build_nano_js__WEBPACK_IMPORTED_MODULE_57__ = __webpack_require__(/*! @withfig/autocomplete/build/nano.js */ \"./node_modules/@withfig/autocomplete/build/nano.js\");\n/* harmony import */ var _withfig_autocomplete_build_nc_js__WEBPACK_IMPORTED_MODULE_58__ = __webpack_require__(/*! @withfig/autocomplete/build/nc.js */ \"./node_modules/@withfig/autocomplete/build/nc.js\");\n/* harmony import */ var _withfig_autocomplete_build_netlify_js__WEBPACK_IMPORTED_MODULE_59__ = __webpack_require__(/*! @withfig/autocomplete/build/netlify.js */ \"./node_modules/@withfig/autocomplete/build/netlify.js\");\n/* harmony import */ var _withfig_autocomplete_build_node_js__WEBPACK_IMPORTED_MODULE_60__ = __webpack_require__(/*! @withfig/autocomplete/build/node.js */ \"./node_modules/@withfig/autocomplete/build/node.js\");\n/* harmony import */ var _withfig_autocomplete_build_npm_js__WEBPACK_IMPORTED_MODULE_61__ = __webpack_require__(/*! @withfig/autocomplete/build/npm.js */ \"./node_modules/@withfig/autocomplete/build/npm.js\");\n/* harmony import */ var _withfig_autocomplete_build_nvm_js__WEBPACK_IMPORTED_MODULE_62__ = __webpack_require__(/*! @withfig/autocomplete/build/nvm.js */ \"./node_modules/@withfig/autocomplete/build/nvm.js\");\n/* harmony import */ var _withfig_autocomplete_build_pandoc_js__WEBPACK_IMPORTED_MODULE_63__ = __webpack_require__(/*! @withfig/autocomplete/build/pandoc.js */ \"./node_modules/@withfig/autocomplete/build/pandoc.js\");\n/* harmony import */ var _withfig_autocomplete_build_php_js__WEBPACK_IMPORTED_MODULE_64__ = __webpack_require__(/*! @withfig/autocomplete/build/php.js */ \"./node_modules/@withfig/autocomplete/build/php.js\");\n/* harmony import */ var _withfig_autocomplete_build_ping_js__WEBPACK_IMPORTED_MODULE_65__ = __webpack_require__(/*! @withfig/autocomplete/build/ping.js */ \"./node_modules/@withfig/autocomplete/build/ping.js\");\n/* harmony import */ var _withfig_autocomplete_build_pip_js__WEBPACK_IMPORTED_MODULE_66__ = __webpack_require__(/*! @withfig/autocomplete/build/pip.js */ \"./node_modules/@withfig/autocomplete/build/pip.js\");\n/* harmony import */ var _withfig_autocomplete_build_pip3_js__WEBPACK_IMPORTED_MODULE_67__ = __webpack_require__(/*! @withfig/autocomplete/build/pip3.js */ \"./node_modules/@withfig/autocomplete/build/pip3.js\");\n/* harmony import */ var _withfig_autocomplete_build_playwright_js__WEBPACK_IMPORTED_MODULE_68__ = __webpack_require__(/*! @withfig/autocomplete/build/playwright.js */ \"./node_modules/@withfig/autocomplete/build/playwright.js\");\n/* harmony import */ var _withfig_autocomplete_build_podman_js__WEBPACK_IMPORTED_MODULE_69__ = __webpack_require__(/*! @withfig/autocomplete/build/podman.js */ \"./node_modules/@withfig/autocomplete/build/podman.js\");\n/* harmony import */ var _withfig_autocomplete_build_ps_js__WEBPACK_IMPORTED_MODULE_70__ = __webpack_require__(/*! @withfig/autocomplete/build/ps.js */ \"./node_modules/@withfig/autocomplete/build/ps.js\");\n/* harmony import */ var _withfig_autocomplete_build_python_js__WEBPACK_IMPORTED_MODULE_71__ = __webpack_require__(/*! @withfig/autocomplete/build/python.js */ \"./node_modules/@withfig/autocomplete/build/python.js\");\n/* harmony import */ var _withfig_autocomplete_build_python3_js__WEBPACK_IMPORTED_MODULE_72__ = __webpack_require__(/*! @withfig/autocomplete/build/python3.js */ \"./node_modules/@withfig/autocomplete/build/python3.js\");\n/* harmony import */ var _withfig_autocomplete_build_rclone_js__WEBPACK_IMPORTED_MODULE_73__ = __webpack_require__(/*! @withfig/autocomplete/build/rclone.js */ \"./node_modules/@withfig/autocomplete/build/rclone.js\");\n/* harmony import */ var _withfig_autocomplete_build_rsync_js__WEBPACK_IMPORTED_MODULE_74__ = __webpack_require__(/*! @withfig/autocomplete/build/rsync.js */ \"./node_modules/@withfig/autocomplete/build/rsync.js\");\n/* harmony import */ var _withfig_autocomplete_build_ruby_js__WEBPACK_IMPORTED_MODULE_75__ = __webpack_require__(/*! @withfig/autocomplete/build/ruby.js */ \"./node_modules/@withfig/autocomplete/build/ruby.js\");\n/* harmony import */ var _withfig_autocomplete_build_rustc_js__WEBPACK_IMPORTED_MODULE_76__ = __webpack_require__(/*! @withfig/autocomplete/build/rustc.js */ \"./node_modules/@withfig/autocomplete/build/rustc.js\");\n/* harmony import */ var _withfig_autocomplete_build_scp_js__WEBPACK_IMPORTED_MODULE_77__ = __webpack_require__(/*! @withfig/autocomplete/build/scp.js */ \"./node_modules/@withfig/autocomplete/build/scp.js\");\n/* harmony import */ var _withfig_autocomplete_build_screen_js__WEBPACK_IMPORTED_MODULE_78__ = __webpack_require__(/*! @withfig/autocomplete/build/screen.js */ \"./node_modules/@withfig/autocomplete/build/screen.js\");\n/* harmony import */ var _withfig_autocomplete_build_sed_js__WEBPACK_IMPORTED_MODULE_79__ = __webpack_require__(/*! @withfig/autocomplete/build/sed.js */ \"./node_modules/@withfig/autocomplete/build/sed.js\");\n/* harmony import */ var _withfig_autocomplete_build_sftp_js__WEBPACK_IMPORTED_MODULE_80__ = __webpack_require__(/*! @withfig/autocomplete/build/sftp.js */ \"./node_modules/@withfig/autocomplete/build/sftp.js\");\n/* harmony import */ var _withfig_autocomplete_build_sort_js__WEBPACK_IMPORTED_MODULE_81__ = __webpack_require__(/*! @withfig/autocomplete/build/sort.js */ \"./node_modules/@withfig/autocomplete/build/sort.js\");\n/* harmony import */ var _withfig_autocomplete_build_ssh_keygen_js__WEBPACK_IMPORTED_MODULE_82__ = __webpack_require__(/*! @withfig/autocomplete/build/ssh-keygen.js */ \"./node_modules/@withfig/autocomplete/build/ssh-keygen.js\");\n/* harmony import */ var _withfig_autocomplete_build_ssh_js__WEBPACK_IMPORTED_MODULE_83__ = __webpack_require__(/*! @withfig/autocomplete/build/ssh.js */ \"./node_modules/@withfig/autocomplete/build/ssh.js\");\n/* harmony import */ var _withfig_autocomplete_build_swift_js__WEBPACK_IMPORTED_MODULE_84__ = __webpack_require__(/*! @withfig/autocomplete/build/swift.js */ \"./node_modules/@withfig/autocomplete/build/swift.js\");\n/* harmony import */ var _withfig_autocomplete_build_systemctl_js__WEBPACK_IMPORTED_MODULE_85__ = __webpack_require__(/*! @withfig/autocomplete/build/systemctl.js */ \"./node_modules/@withfig/autocomplete/build/systemctl.js\");\n/* harmony import */ var _withfig_autocomplete_build_tac_js__WEBPACK_IMPORTED_MODULE_86__ = __webpack_require__(/*! @withfig/autocomplete/build/tac.js */ \"./node_modules/@withfig/autocomplete/build/tac.js\");\n/* harmony import */ var _withfig_autocomplete_build_tail_js__WEBPACK_IMPORTED_MODULE_87__ = __webpack_require__(/*! @withfig/autocomplete/build/tail.js */ \"./node_modules/@withfig/autocomplete/build/tail.js\");\n/* harmony import */ var _withfig_autocomplete_build_tar_js__WEBPACK_IMPORTED_MODULE_88__ = __webpack_require__(/*! @withfig/autocomplete/build/tar.js */ \"./node_modules/@withfig/autocomplete/build/tar.js\");\n/* harmony import */ var _withfig_autocomplete_build_terraform_js__WEBPACK_IMPORTED_MODULE_89__ = __webpack_require__(/*! @withfig/autocomplete/build/terraform.js */ \"./node_modules/@withfig/autocomplete/build/terraform.js\");\n/* harmony import */ var _withfig_autocomplete_build_tmux_js__WEBPACK_IMPORTED_MODULE_90__ = __webpack_require__(/*! @withfig/autocomplete/build/tmux.js */ \"./node_modules/@withfig/autocomplete/build/tmux.js\");\n/* harmony import */ var _withfig_autocomplete_build_top_js__WEBPACK_IMPORTED_MODULE_91__ = __webpack_require__(/*! @withfig/autocomplete/build/top.js */ \"./node_modules/@withfig/autocomplete/build/top.js\");\n/* harmony import */ var _withfig_autocomplete_build_tree_js__WEBPACK_IMPORTED_MODULE_92__ = __webpack_require__(/*! @withfig/autocomplete/build/tree.js */ \"./node_modules/@withfig/autocomplete/build/tree.js\");\n/* harmony import */ var _withfig_autocomplete_build_uname_js__WEBPACK_IMPORTED_MODULE_93__ = __webpack_require__(/*! @withfig/autocomplete/build/uname.js */ \"./node_modules/@withfig/autocomplete/build/uname.js\");\n/* harmony import */ var _withfig_autocomplete_build_uniq_js__WEBPACK_IMPORTED_MODULE_94__ = __webpack_require__(/*! @withfig/autocomplete/build/uniq.js */ \"./node_modules/@withfig/autocomplete/build/uniq.js\");\n/* harmony import */ var _withfig_autocomplete_build_unset_js__WEBPACK_IMPORTED_MODULE_95__ = __webpack_require__(/*! @withfig/autocomplete/build/unset.js */ \"./node_modules/@withfig/autocomplete/build/unset.js\");\n/* harmony import */ var _withfig_autocomplete_build_unzip_js__WEBPACK_IMPORTED_MODULE_96__ = __webpack_require__(/*! @withfig/autocomplete/build/unzip.js */ \"./node_modules/@withfig/autocomplete/build/unzip.js\");\n/* harmony import */ var _withfig_autocomplete_build_vercel_js__WEBPACK_IMPORTED_MODULE_97__ = __webpack_require__(/*! @withfig/autocomplete/build/vercel.js */ \"./node_modules/@withfig/autocomplete/build/vercel.js\");\n/* harmony import */ var _withfig_autocomplete_build_vi_js__WEBPACK_IMPORTED_MODULE_98__ = __webpack_require__(/*! @withfig/autocomplete/build/vi.js */ \"./node_modules/@withfig/autocomplete/build/vi.js\");\n/* harmony import */ var _withfig_autocomplete_build_vim_js__WEBPACK_IMPORTED_MODULE_99__ = __webpack_require__(/*! @withfig/autocomplete/build/vim.js */ \"./node_modules/@withfig/autocomplete/build/vim.js\");\n/* harmony import */ var _withfig_autocomplete_build_vue_js__WEBPACK_IMPORTED_MODULE_100__ = __webpack_require__(/*! @withfig/autocomplete/build/vue.js */ \"./node_modules/@withfig/autocomplete/build/vue.js\");\n/* harmony import */ var _withfig_autocomplete_build_w_js__WEBPACK_IMPORTED_MODULE_101__ = __webpack_require__(/*! @withfig/autocomplete/build/w.js */ \"./node_modules/@withfig/autocomplete/build/w.js\");\n/* harmony import */ var _withfig_autocomplete_build_wc_js__WEBPACK_IMPORTED_MODULE_102__ = __webpack_require__(/*! @withfig/autocomplete/build/wc.js */ \"./node_modules/@withfig/autocomplete/build/wc.js\");\n/* harmony import */ var _withfig_autocomplete_build_wget_js__WEBPACK_IMPORTED_MODULE_103__ = __webpack_require__(/*! @withfig/autocomplete/build/wget.js */ \"./node_modules/@withfig/autocomplete/build/wget.js\");\n/* harmony import */ var _withfig_autocomplete_build_whereis_js__WEBPACK_IMPORTED_MODULE_104__ = __webpack_require__(/*! @withfig/autocomplete/build/whereis.js */ \"./node_modules/@withfig/autocomplete/build/whereis.js\");\n/* harmony import */ var _withfig_autocomplete_build_which_js__WEBPACK_IMPORTED_MODULE_105__ = __webpack_require__(/*! @withfig/autocomplete/build/which.js */ \"./node_modules/@withfig/autocomplete/build/which.js\");\n/* harmony import */ var _withfig_autocomplete_build_who_js__WEBPACK_IMPORTED_MODULE_106__ = __webpack_require__(/*! @withfig/autocomplete/build/who.js */ \"./node_modules/@withfig/autocomplete/build/who.js\");\n/* harmony import */ var _withfig_autocomplete_build_yarn_js__WEBPACK_IMPORTED_MODULE_107__ = __webpack_require__(/*! @withfig/autocomplete/build/yarn.js */ \"./node_modules/@withfig/autocomplete/build/yarn.js\");\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = ({\n 'ansible-doc': _withfig_autocomplete_build_ansible_doc_js__WEBPACK_IMPORTED_MODULE_0__[\"default\"],\n 'ansible-galaxy': _withfig_autocomplete_build_ansible_galaxy_js__WEBPACK_IMPORTED_MODULE_1__[\"default\"],\n 'ansible-playbook': _withfig_autocomplete_build_ansible_playbook_js__WEBPACK_IMPORTED_MODULE_2__[\"default\"],\n 'ansible': _withfig_autocomplete_build_ansible_js__WEBPACK_IMPORTED_MODULE_3__[\"default\"],\n 'aws': _withfig_autocomplete_build_aws_js__WEBPACK_IMPORTED_MODULE_4__[\"default\"],\n 'bundle': _withfig_autocomplete_build_bundle_js__WEBPACK_IMPORTED_MODULE_5__[\"default\"],\n 'cargo': _withfig_autocomplete_build_cargo_js__WEBPACK_IMPORTED_MODULE_6__[\"default\"],\n 'cat': _withfig_autocomplete_build_cat_js__WEBPACK_IMPORTED_MODULE_7__[\"default\"],\n 'cd': _withfig_autocomplete_build_cd_js__WEBPACK_IMPORTED_MODULE_8__[\"default\"],\n 'chmod': _withfig_autocomplete_build_chmod_js__WEBPACK_IMPORTED_MODULE_9__[\"default\"],\n 'chown': _withfig_autocomplete_build_chown_js__WEBPACK_IMPORTED_MODULE_10__[\"default\"],\n 'circleci': _withfig_autocomplete_build_circleci_js__WEBPACK_IMPORTED_MODULE_11__[\"default\"],\n 'clang': _withfig_autocomplete_build_clang_js__WEBPACK_IMPORTED_MODULE_12__[\"default\"],\n 'crontab': _withfig_autocomplete_build_crontab_js__WEBPACK_IMPORTED_MODULE_13__[\"default\"],\n 'curl': _withfig_autocomplete_build_curl_js__WEBPACK_IMPORTED_MODULE_14__[\"default\"],\n 'cut': _withfig_autocomplete_build_cut_js__WEBPACK_IMPORTED_MODULE_15__[\"default\"],\n 'df': _withfig_autocomplete_build_df_js__WEBPACK_IMPORTED_MODULE_16__[\"default\"],\n 'diff': _withfig_autocomplete_build_diff_js__WEBPACK_IMPORTED_MODULE_17__[\"default\"],\n 'dig': _withfig_autocomplete_build_dig_js__WEBPACK_IMPORTED_MODULE_18__[\"default\"],\n 'docker-compose': _withfig_autocomplete_build_docker_compose_js__WEBPACK_IMPORTED_MODULE_19__[\"default\"],\n 'docker': _withfig_autocomplete_build_docker_js__WEBPACK_IMPORTED_MODULE_20__[\"default\"],\n 'dotnet': _withfig_autocomplete_build_dotnet_js__WEBPACK_IMPORTED_MODULE_21__[\"default\"],\n 'du': _withfig_autocomplete_build_du_js__WEBPACK_IMPORTED_MODULE_22__[\"default\"],\n 'echo': _withfig_autocomplete_build_echo_js__WEBPACK_IMPORTED_MODULE_23__[\"default\"],\n 'eleventy': _withfig_autocomplete_build_eleventy_js__WEBPACK_IMPORTED_MODULE_24__[\"default\"],\n 'env': _withfig_autocomplete_build_env_js__WEBPACK_IMPORTED_MODULE_25__[\"default\"],\n 'export': _withfig_autocomplete_build_export_js__WEBPACK_IMPORTED_MODULE_26__[\"default\"],\n 'ffmpeg': _withfig_autocomplete_build_ffmpeg_js__WEBPACK_IMPORTED_MODULE_27__[\"default\"],\n 'find': _withfig_autocomplete_build_find_js__WEBPACK_IMPORTED_MODULE_28__[\"default\"],\n 'firebase': _withfig_autocomplete_build_firebase_js__WEBPACK_IMPORTED_MODULE_29__[\"default\"],\n 'g++': _withfig_autocomplete_build_g_js__WEBPACK_IMPORTED_MODULE_30__[\"default\"],\n 'gatsby': _withfig_autocomplete_build_gatsby_js__WEBPACK_IMPORTED_MODULE_31__[\"default\"],\n 'gcc': _withfig_autocomplete_build_gcc_js__WEBPACK_IMPORTED_MODULE_32__[\"default\"],\n 'gcloud': _withfig_autocomplete_build_gcloud_js__WEBPACK_IMPORTED_MODULE_33__[\"default\"],\n 'gem': _withfig_autocomplete_build_gem_js__WEBPACK_IMPORTED_MODULE_34__[\"default\"],\n 'git': _withfig_autocomplete_build_git_js__WEBPACK_IMPORTED_MODULE_35__[\"default\"],\n 'go': _withfig_autocomplete_build_go_js__WEBPACK_IMPORTED_MODULE_36__[\"default\"],\n 'gpg': _withfig_autocomplete_build_gpg_js__WEBPACK_IMPORTED_MODULE_37__[\"default\"],\n 'gradle': _withfig_autocomplete_build_gradle_js__WEBPACK_IMPORTED_MODULE_38__[\"default\"],\n 'grep': _withfig_autocomplete_build_grep_js__WEBPACK_IMPORTED_MODULE_39__[\"default\"],\n 'head': _withfig_autocomplete_build_head_js__WEBPACK_IMPORTED_MODULE_40__[\"default\"],\n 'helm': _withfig_autocomplete_build_helm_js__WEBPACK_IMPORTED_MODULE_41__[\"default\"],\n 'hexo': _withfig_autocomplete_build_hexo_js__WEBPACK_IMPORTED_MODULE_42__[\"default\"],\n 'htop': _withfig_autocomplete_build_htop_js__WEBPACK_IMPORTED_MODULE_43__[\"default\"],\n 'hugo': _withfig_autocomplete_build_hugo_js__WEBPACK_IMPORTED_MODULE_44__[\"default\"],\n 'java': _withfig_autocomplete_build_java_js__WEBPACK_IMPORTED_MODULE_45__[\"default\"],\n 'jest': _withfig_autocomplete_build_jest_js__WEBPACK_IMPORTED_MODULE_46__[\"default\"],\n 'kill': _withfig_autocomplete_build_kill_js__WEBPACK_IMPORTED_MODULE_47__[\"default\"],\n 'kind': _withfig_autocomplete_build_kind_js__WEBPACK_IMPORTED_MODULE_48__[\"default\"],\n 'kubectl': _withfig_autocomplete_build_kubectl_js__WEBPACK_IMPORTED_MODULE_49__[\"default\"],\n 'less': _withfig_autocomplete_build_less_js__WEBPACK_IMPORTED_MODULE_50__[\"default\"],\n 'ls': _withfig_autocomplete_build_ls_js__WEBPACK_IMPORTED_MODULE_51__[\"default\"],\n 'make': _withfig_autocomplete_build_make_js__WEBPACK_IMPORTED_MODULE_52__[\"default\"],\n 'minikube': _withfig_autocomplete_build_minikube_js__WEBPACK_IMPORTED_MODULE_53__[\"default\"],\n 'more': _withfig_autocomplete_build_more_js__WEBPACK_IMPORTED_MODULE_54__[\"default\"],\n 'mount': _withfig_autocomplete_build_mount_js__WEBPACK_IMPORTED_MODULE_55__[\"default\"],\n 'mvn': _withfig_autocomplete_build_mvn_js__WEBPACK_IMPORTED_MODULE_56__[\"default\"],\n 'nano': _withfig_autocomplete_build_nano_js__WEBPACK_IMPORTED_MODULE_57__[\"default\"],\n 'nc': _withfig_autocomplete_build_nc_js__WEBPACK_IMPORTED_MODULE_58__[\"default\"],\n 'netlify': _withfig_autocomplete_build_netlify_js__WEBPACK_IMPORTED_MODULE_59__[\"default\"],\n 'node': _withfig_autocomplete_build_node_js__WEBPACK_IMPORTED_MODULE_60__[\"default\"],\n 'npm': _withfig_autocomplete_build_npm_js__WEBPACK_IMPORTED_MODULE_61__[\"default\"],\n 'nvm': _withfig_autocomplete_build_nvm_js__WEBPACK_IMPORTED_MODULE_62__[\"default\"],\n 'pandoc': _withfig_autocomplete_build_pandoc_js__WEBPACK_IMPORTED_MODULE_63__[\"default\"],\n 'php': _withfig_autocomplete_build_php_js__WEBPACK_IMPORTED_MODULE_64__[\"default\"],\n 'ping': _withfig_autocomplete_build_ping_js__WEBPACK_IMPORTED_MODULE_65__[\"default\"],\n 'pip': _withfig_autocomplete_build_pip_js__WEBPACK_IMPORTED_MODULE_66__[\"default\"],\n 'pip3': _withfig_autocomplete_build_pip3_js__WEBPACK_IMPORTED_MODULE_67__[\"default\"],\n 'playwright': _withfig_autocomplete_build_playwright_js__WEBPACK_IMPORTED_MODULE_68__[\"default\"],\n 'podman': _withfig_autocomplete_build_podman_js__WEBPACK_IMPORTED_MODULE_69__[\"default\"],\n 'ps': _withfig_autocomplete_build_ps_js__WEBPACK_IMPORTED_MODULE_70__[\"default\"],\n 'python': _withfig_autocomplete_build_python_js__WEBPACK_IMPORTED_MODULE_71__[\"default\"],\n 'python3': _withfig_autocomplete_build_python3_js__WEBPACK_IMPORTED_MODULE_72__[\"default\"],\n 'rclone': _withfig_autocomplete_build_rclone_js__WEBPACK_IMPORTED_MODULE_73__[\"default\"],\n 'rsync': _withfig_autocomplete_build_rsync_js__WEBPACK_IMPORTED_MODULE_74__[\"default\"],\n 'ruby': _withfig_autocomplete_build_ruby_js__WEBPACK_IMPORTED_MODULE_75__[\"default\"],\n 'rustc': _withfig_autocomplete_build_rustc_js__WEBPACK_IMPORTED_MODULE_76__[\"default\"],\n 'scp': _withfig_autocomplete_build_scp_js__WEBPACK_IMPORTED_MODULE_77__[\"default\"],\n 'screen': _withfig_autocomplete_build_screen_js__WEBPACK_IMPORTED_MODULE_78__[\"default\"],\n 'sed': _withfig_autocomplete_build_sed_js__WEBPACK_IMPORTED_MODULE_79__[\"default\"],\n 'sftp': _withfig_autocomplete_build_sftp_js__WEBPACK_IMPORTED_MODULE_80__[\"default\"],\n 'sort': _withfig_autocomplete_build_sort_js__WEBPACK_IMPORTED_MODULE_81__[\"default\"],\n 'ssh-keygen': _withfig_autocomplete_build_ssh_keygen_js__WEBPACK_IMPORTED_MODULE_82__[\"default\"],\n 'ssh': _withfig_autocomplete_build_ssh_js__WEBPACK_IMPORTED_MODULE_83__[\"default\"],\n 'swift': _withfig_autocomplete_build_swift_js__WEBPACK_IMPORTED_MODULE_84__[\"default\"],\n 'systemctl': _withfig_autocomplete_build_systemctl_js__WEBPACK_IMPORTED_MODULE_85__[\"default\"],\n 'tac': _withfig_autocomplete_build_tac_js__WEBPACK_IMPORTED_MODULE_86__[\"default\"],\n 'tail': _withfig_autocomplete_build_tail_js__WEBPACK_IMPORTED_MODULE_87__[\"default\"],\n 'tar': _withfig_autocomplete_build_tar_js__WEBPACK_IMPORTED_MODULE_88__[\"default\"],\n 'terraform': _withfig_autocomplete_build_terraform_js__WEBPACK_IMPORTED_MODULE_89__[\"default\"],\n 'tmux': _withfig_autocomplete_build_tmux_js__WEBPACK_IMPORTED_MODULE_90__[\"default\"],\n 'top': _withfig_autocomplete_build_top_js__WEBPACK_IMPORTED_MODULE_91__[\"default\"],\n 'tree': _withfig_autocomplete_build_tree_js__WEBPACK_IMPORTED_MODULE_92__[\"default\"],\n 'uname': _withfig_autocomplete_build_uname_js__WEBPACK_IMPORTED_MODULE_93__[\"default\"],\n 'uniq': _withfig_autocomplete_build_uniq_js__WEBPACK_IMPORTED_MODULE_94__[\"default\"],\n 'unset': _withfig_autocomplete_build_unset_js__WEBPACK_IMPORTED_MODULE_95__[\"default\"],\n 'unzip': _withfig_autocomplete_build_unzip_js__WEBPACK_IMPORTED_MODULE_96__[\"default\"],\n 'vercel': _withfig_autocomplete_build_vercel_js__WEBPACK_IMPORTED_MODULE_97__[\"default\"],\n 'vi': _withfig_autocomplete_build_vi_js__WEBPACK_IMPORTED_MODULE_98__[\"default\"],\n 'vim': _withfig_autocomplete_build_vim_js__WEBPACK_IMPORTED_MODULE_99__[\"default\"],\n 'vue': _withfig_autocomplete_build_vue_js__WEBPACK_IMPORTED_MODULE_100__[\"default\"],\n 'w': _withfig_autocomplete_build_w_js__WEBPACK_IMPORTED_MODULE_101__[\"default\"],\n 'wc': _withfig_autocomplete_build_wc_js__WEBPACK_IMPORTED_MODULE_102__[\"default\"],\n 'wget': _withfig_autocomplete_build_wget_js__WEBPACK_IMPORTED_MODULE_103__[\"default\"],\n 'whereis': _withfig_autocomplete_build_whereis_js__WEBPACK_IMPORTED_MODULE_104__[\"default\"],\n 'which': _withfig_autocomplete_build_which_js__WEBPACK_IMPORTED_MODULE_105__[\"default\"],\n 'who': _withfig_autocomplete_build_who_js__WEBPACK_IMPORTED_MODULE_106__[\"default\"],\n 'yarn': _withfig_autocomplete_build_yarn_js__WEBPACK_IMPORTED_MODULE_107__[\"default\"]\n});\n\n//# sourceURL=webpack://SuggestionsBundle/./src/index.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/ansible-doc.js": +/*!*****************************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/ansible-doc.js ***! + \*****************************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ s)\n/* harmony export */ });\nvar i = {\n script: [\"ansible-doc\", \"--list\", \"--json\"],\n postProcess: function postProcess(o) {\n var e = JSON.parse(o);\n return Object.keys(e).map(n => ({\n name: n,\n description: e[n]\n }));\n }\n },\n t = {\n name: \"ansible-doc\",\n description: \"Displays information on modules installed in Ansible libraries\",\n options: [{\n name: \"--metadata-dump\",\n description: \"For internal testing only Dump json metadata for all plugins\",\n args: {\n isOptional: !0\n }\n }, {\n name: \"--playbook-dir\",\n description: \"Sets the relative path for many features including roles/ group_vars/ etc\",\n args: {\n name: \"BASEDIR\",\n description: \"Base directory\",\n template: [\"filepaths\"]\n }\n }, {\n name: \"--version\",\n description: \"Shows version number, config file location, module search path, module location, executable location and exit\",\n args: {\n isOptional: !0\n }\n }, {\n name: [\"--list_files\", \"-F\"],\n description: \"Show plugin names and their source files without summaries (implies --list)\",\n args: {\n isOptional: !0\n }\n }, {\n name: [\"--module-path\", \"-M\"],\n description: \"Prepend colon-separated path(s) to module library\",\n args: {\n isOptional: !0\n }\n }, {\n name: [\"--entry-point\", \"-E\"],\n description: \"Select the entry point for role(s)\",\n args: {\n name: \"ENTRY_POINT\"\n }\n }, {\n name: [\"--help\", \"-h\"],\n description: \"Show help and exit\",\n args: {\n isOptional: !0\n }\n }, {\n name: [\"--json\", \"-j\"],\n description: \"Change output into json format\",\n args: {\n isOptional: !0\n }\n }, {\n name: [\"--list\", \"-l\"],\n description: \"List available plugins; a supplied argument will be used for filtering (can be a namespace or full collection name)\",\n args: {\n name: \"NAMESPACE|COLLECTION\",\n isOptional: !0,\n isDangerous: !0\n }\n }, {\n name: [\"--roles-path\", \"-r\"],\n description: \"The path to the directory containing your roles\",\n args: {\n name: \"PATH\",\n template: [\"filepaths\"]\n }\n }, {\n name: [\"--snippet\", \"-s\"],\n description: \"Show playbook snippet for these plugin types: inventory, lookup, module\",\n args: {\n name: \"PLUGIN_TYPE\",\n suggestions: [\"inventory\", \"lookup\", \"module\"]\n }\n }, {\n name: [\"--type\", \"-t\"],\n description: 'Choose which plugin type (defaults to \"module\")',\n args: {\n name: \"PLUGIN_TYPE\",\n suggestions: [\"become\", \"cache\", \"callback\", \"cliconf\", \"connection\", \"httpapi\", \"inventory\", \"lookup\", \"netconf\", \"shell\", \"vars\", \"module\", \"strategy\", \"role\", \"keyword\"],\n default: \"module\"\n }\n }, {\n name: \"--verbose\",\n description: \"Verbose mode (-vvv for more, -vvvv to enable connection debugging)\",\n exclusiveOn: [\"-v\"],\n args: {\n isOptional: !0\n }\n }, {\n name: \"-v\",\n description: \"Verbose mode (-vvv for more, -vvvv to enable connection debugging)\",\n isRepeatable: 5,\n exclusiveOn: [\"--verbose\"],\n args: {\n isOptional: !0\n }\n }],\n args: {\n name: \"plugin\",\n isDangerous: !0,\n isVariadic: !0,\n generators: i\n }\n },\n s = t;\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/ansible-doc.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/ansible-galaxy.js": +/*!********************************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/ansible-galaxy.js ***! + \********************************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ b)\n/* harmony export */ });\nvar e = [{\n name: [\"--server\", \"-s\"],\n description: \"The Galaxy API server URL\",\n args: {\n name: \"api_server\",\n description: \"The Galaxy API server URL\"\n }\n }, {\n name: [\"--token\", \"--api-key\"],\n description: \"The Ansible Galaxy API key which can be found at https://galaxy.ansible.com/me/preferences\",\n args: {\n name: \"api_key\",\n description: \"The Ansible Galaxy API key which can be found at https://galaxy.ansible.com/me/preferences\"\n }\n }, {\n name: [\"--ignore-certs\", \"-c\"],\n description: \"Ignore SSL certificate validation errors\"\n }],\n o = [{\n name: \"--clear-response-cache\",\n description: \"Clear the existing server response cache\"\n }, {\n name: \"--no-cache\",\n description: \"Do not use the server response cache\"\n }, {\n name: [\"--no-deps\", \"-n\"],\n description: \"Don't download collection(s) listed as dependencies\"\n }, {\n name: [\"--download-path\", \"-p\"],\n description: \"The directory to download the collections to\",\n args: {\n name: \"download_path\",\n description: \"The directory to download the collections to\",\n template: [\"folders\"]\n }\n }, {\n name: [\"--requirements-file\", \"-r\"],\n description: \"A file containing a list of collections to be downloaded\",\n args: {\n name: \"requirements\",\n description: \"A file containing a list of collections to be downloaded\",\n template: [\"filepaths\"]\n }\n }, {\n name: \"--pre\",\n description: \"Include pre-release versions\"\n }],\n t = [{\n name: [\"--force\", \"-f\"],\n description: \"Force overwriting an existing role or collection\"\n }, {\n name: \"--init-path\",\n description: \"The path in which the skeleton collection will be created\",\n args: {\n name: \"init_path\",\n description: \"The path in which the skeleton collection will be created\",\n template: [\"folders\"],\n default: \".\"\n }\n }, {\n name: \"--collection-skeleton\",\n description: \"The path to a collection skeleton that the new collection should be based upon\",\n args: {\n name: \"collection_skeleton\",\n description: \"The path to a collection skeleton that the new collection should be based upon\",\n template: [\"folders\"]\n }\n }],\n i = [{\n name: [\"--force\", \"-f\"],\n description: \"Force overwriting an existing role or collection\"\n }, {\n name: \"--output-path\",\n description: \"The path in which the collection is built to\",\n args: {\n name: \"output_path\",\n description: \"The path in which the collection is built to\",\n template: [\"folders\"],\n default: \".\"\n }\n }],\n n = [{\n name: \"--no-wait\",\n description: \"Don't wait for import validation results\"\n }, {\n name: \"--import-timeout\",\n description: \"The time to wait for the collection import process to finish\",\n args: {\n name: \"import_timeout\",\n description: \"The time to wait for the collection import process to finish\",\n suggestions: [\"60\", \"300\", \"600\", \"900\"],\n default: \"60\"\n }\n }],\n s = [{\n name: [\"--collections-path\", \"-p\"],\n description: \"One or more directories to search for collections in addition to the default COLLECTIONS_PATHS; separate multiple paths with ':'\",\n args: {\n name: \"collections_path\",\n description: \"One or more directories to search for collections in addition to the default COLLECTIONS_PATHS; separate multiple paths with ':'\",\n template: [\"folders\"],\n suggestions: [\"~/.ansible/collections:/usr/share/ansible/collections\"],\n default: \"~/.ansible/collections:/usr/share/ansible/collections\"\n }\n }, {\n name: \"--format\",\n description: \"Format to display the list of collections in\",\n args: {\n name: \"format\",\n description: \"Format to display the list of collections in\",\n suggestions: [\"human\", \"yaml\", \"json\"],\n default: \"human\"\n }\n }],\n r = [{\n name: [\"--collections-path\", \"-p\"],\n description: \"One or more directories to search for collections in addition to the default COLLECTIONS_PATHS; separate multiple paths with ':'\",\n args: {\n name: \"collections_path\",\n description: \"One or more directories to search for collections in addition to the default COLLECTIONS_PATHS; separate multiple paths with ':'\",\n template: [\"folders\"],\n suggestions: [\"~/.ansible/collections:/usr/share/ansible/collections\"],\n default: \"~/.ansible/collections:/usr/share/ansible/collections\"\n }\n }, {\n name: [\"--ignore-errors\", \"-i\"],\n description: \"Ignore errors during verification and continue with the next specified collection\"\n }, {\n name: [\"--requirements-file\", \"-r\"],\n description: \"A file containing a list of collections to be downloaded\",\n args: {\n name: \"requirements\",\n description: \"A file containing a list of collections to be downloaded\",\n template: [\"filepaths\"]\n }\n }],\n a = {\n name: \"collection\",\n description: \"Name of the collection\",\n isVariadic: !0,\n suggestions: [\"amazon.aws\", \"ansible.builtin\", \"ansible.netcommon\", \"ansible.posix\", \"ansible.utils\", \"ansible.windows\", \"arista.eos\", \"awx.awx\", \"azure.azcollection\", \"check_point.mgmt\", \"chocolatey.chocolatey\", \"cisco.aci\", \"cisco.asa\", \"cisco.intersight\", \"cisco.ios\", \"cisco.iosxr\", \"cisco.ise\", \"cisco.meraki\", \"cisco.mso\", \"cisco.nso\", \"cisco.nxos\", \"cisco.ucs\", \"cloud.common\", \"cloudscale_ch.cloud\", \"community.aws\", \"community.azure\", \"community.ciscosmb\", \"community.crypto\", \"community.digitalocean\", \"community.dns\", \"community.docker\", \"community.fortios\", \"community.general\", \"community.google\", \"community.grafana\", \"community.hashi_vault\", \"community.hrobot\", \"community.kubernetes\", \"community.kubevirt\", \"community.libvirt\", \"community.mongodb\", \"community.mysql\", \"community.network\", \"community.okd\", \"community.postgresql\", \"community.proxysql\", \"community.rabbitmq\", \"community.routeros\", \"community.sap\", \"community.skydive\", \"community.sops\", \"community.vmware\", \"community.windows\", \"community.zabbix\", \"containers.podman\", \"cyberark.conjur\", \"cyberark.pas\", \"dellemc.enterprise_sonic\", \"dellemc.openmanage\", \"dellemc.os10\", \"dellemc.os6\", \"dellemc.os9\", \"f5networks.f5_modules\", \"fortinet.fortimanager\", \"fortinet.fortios\", \"frr.frr\", \"gluster.gluster\", \"google.cloud\", \"hetzner.hcloud\", \"hpe.nimble\", \"ibm.qradar\", \"infinidat.infinibox\", \"infoblox.nios_modules\", \"inspur.sm\", \"junipernetworks.junos\", \"kubernetes.core\", \"mellanox.onyx\", \"netapp.aws\", \"netapp.azure\", \"netapp.cloudmanager\", \"netapp.elementsw\", \"netapp.ontap\", \"netapp.storagegrid\", \"netapp.um_info\", \"netapp_eseries.santricity\", \"netbox.netbox\", \"ngine_io.cloudstack\", \"ngine_io.exoscale\", \"ngine_io.vultr\", \"openstack.cloud\", \"openvswitch.openvswitch\", \"ovirt.ovirt\", \"purestorage.flasharray\", \"purestorage.flashblade\", \"sensu.sensu_go\", \"servicenow.servicenow\", \"splunk.es\", \"t_systems_mms.icinga_director\", \"theforeman.foreman\", \"vmware.vmware_rest\", \"vyos.vyos\", \"wti.remote\"]\n },\n l = [{\n name: [\"--force\", \"-f\"],\n description: \"Force overwriting an existing role or collection\"\n }, {\n name: \"--offline\",\n description: \"Don't query the galaxy API when creating roles\"\n }, {\n name: \"--init-path\",\n description: \"The path in which the skeleton collection will be created\",\n args: {\n name: \"init_path\",\n description: \"The path in which the skeleton collection will be created\",\n template: [\"folders\"],\n default: \".\"\n }\n }, {\n name: \"--role-skeleton\",\n description: \"The path in which the skeleton role will be created\",\n args: {\n name: \"role_skeleton\",\n description: \"The path in which the skeleton role will be created\",\n template: [\"folders\"],\n default: \".\"\n }\n }, {\n name: \"--type\",\n description: \"Initialize using an alternate role type\",\n args: {\n name: \"role_type\",\n description: \"Initialize using an alternate role type\",\n suggestions: [\"container\", \"apb\", \"network\"],\n default: \"container\"\n }\n }],\n c = [{\n name: [\"--roles-path\", \"--role-path\"],\n description: \"The path to the directory containing your roles\",\n args: {\n name: \"roles_path\",\n description: \"The path to the directory containing your roles\",\n suggestions: [\"~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\"],\n default: \"~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\"\n }\n }],\n p = [{\n name: [\"--roles-path\", \"--role-path\"],\n description: \"The path to the directory containing your roles\",\n args: {\n name: \"roles_path\",\n description: \"The path to the directory containing your roles\",\n suggestions: [\"~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\"],\n default: \"~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\"\n }\n }],\n m = [{\n name: \"--platforms\",\n description: \"List of OS platforms to filter by\",\n args: {\n name: \"platforms\",\n description: \"List of OS platforms to filter by\"\n }\n }, {\n name: \"--galaxy-tags\",\n description: \"List of Galaxy tags to filter by\",\n args: {\n name: \"galaxy_tags\",\n description: \"List of Galaxy tags to filter by\"\n }\n }, {\n name: \"--author\",\n description: \"GitHub username\",\n args: {\n name: \"author\",\n description: \"GitHub username\"\n }\n }],\n d = [{\n name: \"--branch\",\n description: \"The name of a branch to import. Defaults to the repository's default branch (usually master)\",\n args: {\n name: \"reference\",\n description: \"The name of a branch to import. Defaults to the repository's default branch (usually master)\",\n suggestions: [\"master\", \"main\", \"develop\"],\n default: \"master\"\n }\n }, {\n name: \"--role-name\",\n description: \"The name the role should have, if different than the repo name\",\n args: {\n name: \"role_name\",\n description: \"The name the role should have, if different than the repo name\"\n }\n }, {\n name: \"--status\",\n description: \"Check the status of the most recent import request for given github_user/github_repo\"\n }],\n h = [{\n name: [\"--roles-path\", \"--role-path\"],\n description: \"The path to the directory containing your roles\",\n args: {\n name: \"roles_path\",\n description: \"The path to the directory containing your roles\",\n suggestions: [\"~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\"],\n default: \"~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\"\n }\n }, {\n name: \"--remove\",\n description: \"Remove the integration matching the provided ID value. Use --list to see ID values\",\n args: {\n name: \"remove_id\",\n description: \"Remove the integration matching the provided ID value. Use --list to see ID values\"\n },\n exclusiveOn: [\"--list\"]\n }, {\n name: \"--list\",\n description: \"List all of your integrations\",\n exclusiveOn: [\"--remove\"]\n }],\n u = [{\n name: [\"--roles-path\", \"--role-path\"],\n description: \"The path to the directory containing your roles\",\n args: {\n name: \"roles_path\",\n description: \"The path to the directory containing your roles\",\n suggestions: [\"~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\"],\n default: \"~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\"\n }\n }, {\n name: \"--offline\",\n description: \"Don't query the galaxy API when creating roles\"\n }],\n g = [{\n name: [\"--roles-path\", \"--role-path\"],\n description: \"The path to the directory containing your roles\",\n args: {\n name: \"roles_path\",\n description: \"The path to the directory containing your roles\",\n suggestions: [\"~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\"],\n default: \"~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\"\n }\n }, {\n name: [\"--no-deps\", \"-n\"],\n description: \"Don't download roles listed as dependencies\"\n }, {\n name: \"--force-with-deps\",\n description: \"Force overwriting an existing role and its dependencies\"\n }, {\n name: [\"--requirements-file\", \"-r\"],\n description: \"A file containing a list of collections to be downloaded\",\n args: {\n name: \"requirements\",\n description: \"A file containing a list of collections to be downloaded\",\n template: [\"filepaths\"]\n }\n }, {\n name: [\"--keep-scm-meta\", \"-g\"],\n description: \"Use tar instead of the scm archive option when packaging the role\"\n }],\n f = {\n name: \"ansible-galaxy\",\n description: \"Perform various Role and Collection related operations\",\n subcommands: [{\n name: \"collection\",\n description: \"Operate on collections\",\n subcommands: [{\n name: \"download\",\n description: \"Download collections\",\n options: [...e, ...o],\n args: a\n }, {\n name: \"init\",\n description: \"Initialize collections\",\n options: [...e, ...t],\n args: {\n name: \"collection_name\",\n description: \"Name of the collection\"\n }\n }, {\n name: \"build\",\n description: \"Build collections\",\n options: [...e, ...i],\n args: {\n name: \"collection\",\n description: \"Path(s) to the collection to be built\",\n template: [\"folders\"],\n default: \".\"\n }\n }, {\n name: \"publish\",\n description: \"Publish collections\",\n options: [...e, ...n],\n args: {\n name: \"collection_path\",\n description: \"The path to the collection tarball to publish\",\n template: [\"folders\"],\n default: \".\"\n }\n }, {\n name: \"list\",\n description: \"List collections\",\n options: [...e, ...s],\n args: {\n name: \"collection\",\n description: \"The collections to list\",\n isOptional: !0\n }\n }, {\n name: \"verify\",\n description: \"Verify collections\",\n options: [...e, ...r],\n args: {\n name: \"collection_name\",\n description: \"The collections to verify\",\n isOptional: !0\n }\n }]\n }, {\n name: \"role\",\n description: \"Operate on roles\",\n subcommands: [{\n name: \"init\",\n description: \"Initialize roles\",\n options: [...e, ...l],\n args: {\n name: \"role_name\",\n description: \"Name of the role\"\n }\n }, {\n name: \"remove\",\n description: \"Remove roles\",\n options: [...e, ...c],\n args: {\n name: \"role_name\",\n description: \"The role to remove\"\n }\n }, {\n name: \"list\",\n description: \"List roles\",\n options: [...e, ...p],\n args: {\n name: \"role\",\n description: \"The role to list\",\n isOptional: !0\n }\n }, {\n name: \"search\",\n description: \"Search roles\",\n options: [...e, ...m],\n args: {\n name: \"searchterm\",\n description: \"Search terms\",\n isOptional: !0\n }\n }, {\n name: \"import\",\n description: \"Import roles\",\n options: [...e, ...d],\n args: [{\n name: \"github_user\",\n description: \"GitHub username\",\n isOptional: !0\n }, {\n name: \"github_repo\",\n description: \"GitHub repository\"\n }]\n }, {\n name: \"setup\",\n description: \"Set up roles\",\n options: [...e, ...h]\n }, {\n name: \"info\",\n description: \"Role information\",\n options: [...e, ...u]\n }, {\n name: \"install\",\n description: \"Install roles\",\n options: [...e, ...g]\n }]\n }],\n options: [{\n name: [\"--help\", \"-h\"],\n description: \"Show help and exit\",\n isPersistent: !0\n }, {\n name: \"--verbose\",\n description: \"Verbose mode (-vvv for more, -vvvv to enable connection debugging)\",\n exclusiveOn: [\"-v\"],\n isPersistent: !0\n }, {\n name: \"-v\",\n description: \"Verbose mode (-vvv for more, -vvvv to enable connection debugging)\",\n isRepeatable: 4,\n exclusiveOn: [\"--verbose\"],\n isPersistent: !0\n }, {\n name: \"--version\",\n description: \"Shows version number, config file location, module search path, module location, executable location and exit\",\n isPersistent: !0\n }]\n },\n b = f;\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/ansible-galaxy.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/ansible-playbook.js": +/*!**********************************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/ansible-playbook.js ***! + \**********************************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ Ve)\n/* harmony export */ });\nvar Z = Object.create;\nvar q = Object.defineProperty;\nvar Y = Object.getOwnPropertyDescriptor;\nvar ee = Object.getOwnPropertyNames;\nvar te = Object.getPrototypeOf,\n ne = Object.prototype.hasOwnProperty;\nvar T = (e, t) => () => (t || e((t = {\n exports: {}\n}).exports, t), t.exports);\nvar se = (e, t, n, s) => {\n if (t && typeof t == \"object\" || typeof t == \"function\") {\n var _loop = function _loop(i) {\n !ne.call(e, i) && i !== n && q(e, i, {\n get: () => t[i],\n enumerable: !(s = Y(t, i)) || s.enumerable\n });\n };\n for (var i of ee(t)) {\n _loop(i);\n }\n }\n return e;\n};\nvar ie = (e, t, n) => (n = e != null ? Z(te(e)) : {}, se(t || !e || !e.__esModule ? q(n, \"default\", {\n value: e,\n enumerable: !0\n}) : n, e));\nvar N = T(k => {\n \"use strict\";\n\n Object.defineProperty(k, \"__esModule\", {\n value: !0\n });\n k.shellExpand = k.ensureTrailingSlash = void 0;\n var re = e => e.endsWith(\"/\") ? e : \"\".concat(e, \"/\");\n k.ensureTrailingSlash = re;\n var oe = (e, t) => e.startsWith(\"~\") && (e.length === 1 || e.charAt(1) === \"/\") ? e.replace(\"~\", t) : e,\n ae = (e, t) => e.replace(/\\$([A-Za-z0-9_]+)/g, i => {\n var r;\n var a = i.slice(1);\n return (r = t[a]) !== null && r !== void 0 ? r : i;\n }).replace(/\\$\\{([A-Za-z0-9_]+)(?::-([^}]+))?\\}/g, (i, r, a) => {\n var c, u;\n return (u = (c = t[r]) !== null && c !== void 0 ? c : a) !== null && u !== void 0 ? u : i;\n }),\n le = (e, t) => {\n var n;\n var {\n environmentVariables: s\n } = t;\n return ae(oe(e, (n = s === null || s === void 0 ? void 0 : s.HOME) !== null && n !== void 0 ? n : \"~\"), s);\n };\n k.shellExpand = le;\n});\nvar K = T(v => {\n \"use strict\";\n\n var ce = v && v.__awaiter || function (e, t, n, s) {\n function i(r) {\n return r instanceof n ? r : new n(function (a) {\n a(r);\n });\n }\n return new (n || (n = Promise))(function (r, a) {\n function c(o) {\n try {\n l(s.next(o));\n } catch (d) {\n a(d);\n }\n }\n function u(o) {\n try {\n l(s.throw(o));\n } catch (d) {\n a(d);\n }\n }\n function l(o) {\n o.done ? r(o.value) : i(o.value).then(c, u);\n }\n l((s = s.apply(e, t || [])).next());\n });\n };\n Object.defineProperty(v, \"__esModule\", {\n value: !0\n });\n v.filepaths = v.folders = v.getCurrentInsertedDirectory = v.sortFilesAlphabetically = void 0;\n var C = N();\n function W(e) {\n var t = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : [];\n var n = t.map(i => i.toLowerCase()),\n s = e.filter(i => !n.includes(i.toLowerCase()));\n return [...s.filter(i => !i.startsWith(\".\")).sort((i, r) => i.localeCompare(r)), ...s.filter(i => i.startsWith(\".\")).sort((i, r) => i.localeCompare(r)), \"../\"];\n }\n v.sortFilesAlphabetically = W;\n var ue = (e, t, n) => {\n if (e === null) return \"/\";\n var s = (0, C.shellExpand)(t, n),\n i = s.slice(0, s.lastIndexOf(\"/\") + 1);\n return i === \"\" ? (0, C.ensureTrailingSlash)(e) : i.startsWith(\"/\") ? i : \"\".concat((0, C.ensureTrailingSlash)(e)).concat(i);\n };\n v.getCurrentInsertedDirectory = ue;\n function E() {\n var e = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n var {\n extensions: t = [],\n equals: n = [],\n matches: s,\n filterFolders: i = !1,\n editFileSuggestions: r,\n editFolderSuggestions: a,\n rootDirectory: c,\n showFolders: u = \"always\"\n } = e,\n l = new Set(t),\n o = new Set(n),\n d = () => t.length > 0 || n.length > 0 || s,\n g = function g() {\n var p = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : [];\n return d() ? p.filter(_ref => {\n var {\n name: m = \"\",\n type: _\n } = _ref;\n if (!i && _ === \"folder\" || o.has(m) || s && m.match(s)) return !0;\n var [, ...f] = m.split(\".\");\n if (f.length >= 1) {\n var b = f.length - 1,\n O = f[b];\n do {\n if (l.has(O)) return !0;\n b -= 1, O = [f[b], O].join(\".\");\n } while (b >= 0);\n }\n return !1;\n }) : p;\n },\n S = function S() {\n var p = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : [];\n return !r && !a ? p : p.map(m => Object.assign(Object.assign({}, m), (m.type === \"file\" ? r : a) || {}));\n };\n return {\n trigger: (p, m) => {\n var _ = p.lastIndexOf(\"/\"),\n f = m.lastIndexOf(\"/\");\n return _ !== f ? !0 : _ === -1 && f === -1 ? !1 : p.slice(0, _) !== m.slice(0, f);\n },\n getQueryTerm: p => p.slice(p.lastIndexOf(\"/\") + 1),\n custom: (p, m, _) => ce(this, void 0, void 0, function* () {\n var f;\n var {\n isDangerous: b,\n currentWorkingDirectory: O,\n searchTerm: h\n } = _,\n x = (f = (0, v.getCurrentInsertedDirectory)(c !== null && c !== void 0 ? c : O, h, _)) !== null && f !== void 0 ? f : \"/\";\n try {\n var D = yield m({\n command: \"ls\",\n args: [\"-1ApL\"],\n cwd: x\n }),\n X = W(D.stdout.split(\"\\n\"), [\".DS_Store\"]),\n R = [];\n for (var A of X) if (A) {\n var V = A.endsWith(\"/\") ? \"folders\" : \"filepaths\";\n (V === \"filepaths\" && u !== \"only\" || V === \"folders\" && u !== \"never\") && R.push({\n type: V === \"filepaths\" ? \"file\" : \"folder\",\n name: A,\n insertValue: A,\n isDangerous: b,\n context: {\n templateType: V\n }\n });\n }\n return S(g(R));\n } catch (_unused) {\n return [];\n }\n })\n };\n }\n v.folders = Object.assign(() => E({\n showFolders: \"only\"\n }), Object.freeze(E({\n showFolders: \"only\"\n })));\n v.filepaths = Object.assign(E, Object.freeze(E()));\n});\nvar J = T(w => {\n \"use strict\";\n\n var j = w && w.__awaiter || function (e, t, n, s) {\n function i(r) {\n return r instanceof n ? r : new n(function (a) {\n a(r);\n });\n }\n return new (n || (n = Promise))(function (r, a) {\n function c(o) {\n try {\n l(s.next(o));\n } catch (d) {\n a(d);\n }\n }\n function u(o) {\n try {\n l(s.throw(o));\n } catch (d) {\n a(d);\n }\n }\n function l(o) {\n o.done ? r(o.value) : i(o.value).then(c, u);\n }\n l((s = s.apply(e, t || [])).next());\n });\n };\n Object.defineProperty(w, \"__esModule\", {\n value: !0\n });\n w.keyValueList = w.keyValue = w.valueList = void 0;\n var $ = new Map();\n function F(e, t) {\n return e.length === 0 ? t : t.map(n => n.insertValue ? n : Object.assign(Object.assign({}, n), {\n insertValue: n.name + e\n }));\n }\n function z(e, t, n) {\n return j(this, void 0, void 0, function* () {\n if (typeof e == \"function\") {\n var s = yield e(...n);\n return F(t, s);\n }\n if (typeof e[0] == \"string\") {\n var _s = e.map(i => ({\n name: i\n }));\n return F(t, _s);\n }\n return F(t, e);\n });\n }\n function P(e, t, n, s) {\n return j(this, void 0, void 0, function* () {\n if (n || Array.isArray(e)) {\n var i = $.get(e);\n return i === void 0 && (i = yield z(e, t, s), $.set(e, i)), i;\n }\n return z(e, t, s);\n });\n }\n function H(e, t) {\n return typeof t == \"string\" ? e && t === \"keys\" || !e && t === \"values\" : t;\n }\n function L(e) {\n for (var _len = arguments.length, t = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {\n t[_key - 1] = arguments[_key];\n }\n return Math.max(...t.map(n => e.lastIndexOf(n)));\n }\n function M(e, t) {\n var n = new Set(e);\n return t.filter(s => {\n var i;\n return typeof s.name == \"string\" ? !n.has(s.name) : !(!((i = s.name) === null || i === void 0) && i.some(r => n.has(r)));\n });\n }\n function de(_ref2) {\n var _this = this;\n var {\n delimiter: e = \",\",\n values: t = [],\n cache: n = !1,\n insertDelimiter: s = !1,\n allowRepeatedValues: i = !1\n } = _ref2;\n return {\n trigger: (r, a) => r.lastIndexOf(e) !== a.lastIndexOf(e),\n getQueryTerm: r => r.slice(r.lastIndexOf(e) + e.length),\n custom: function custom() {\n for (var _len2 = arguments.length, r = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {\n r[_key2] = arguments[_key2];\n }\n return j(_this, void 0, void 0, function* () {\n var a;\n var c = yield P(t, s ? e : \"\", n, r);\n if (i) return c;\n var [u] = r,\n l = (a = u[u.length - 1]) === null || a === void 0 ? void 0 : a.split(e);\n return M(l, c);\n });\n }\n };\n }\n w.valueList = de;\n function fe(_ref3) {\n var _this2 = this;\n var {\n separator: e = \"=\",\n keys: t = [],\n values: n = [],\n cache: s = !1,\n insertSeparator: i = !0\n } = _ref3;\n return {\n trigger: (r, a) => r.indexOf(e) !== a.indexOf(e),\n getQueryTerm: r => r.slice(r.indexOf(e) + 1),\n custom: function custom() {\n for (var _len3 = arguments.length, r = new Array(_len3), _key3 = 0; _key3 < _len3; _key3++) {\n r[_key3] = arguments[_key3];\n }\n return j(_this2, void 0, void 0, function* () {\n var [a] = r,\n u = !a[a.length - 1].includes(e),\n l = u ? t : n,\n o = H(u, s);\n return P(l, u && i ? e : \"\", o, r);\n });\n }\n };\n }\n w.keyValue = fe;\n function pe(_ref4) {\n var _this3 = this;\n var {\n separator: e = \"=\",\n delimiter: t = \",\",\n keys: n = [],\n values: s = [],\n cache: i = !1,\n insertSeparator: r = !0,\n insertDelimiter: a = !1,\n allowRepeatedKeys: c = !1,\n allowRepeatedValues: u = !0\n } = _ref4;\n return {\n trigger: (l, o) => {\n var d = L(l, e, t),\n g = L(o, e, t);\n return d !== g;\n },\n getQueryTerm: l => {\n var o = L(l, e, t);\n return l.slice(o + 1);\n },\n custom: function custom() {\n for (var _len4 = arguments.length, l = new Array(_len4), _key4 = 0; _key4 < _len4; _key4++) {\n l[_key4] = arguments[_key4];\n }\n return j(_this3, void 0, void 0, function* () {\n var [o] = l,\n d = o[o.length - 1],\n g = L(d, e, t),\n S = g === -1 || d.slice(g, g + e.length) !== e,\n p = S ? n : s,\n m = H(S, i),\n f = yield P(p, S ? r ? e : \"\" : a ? t : \"\", m, l);\n if (S) {\n if (c) return f;\n var O = d.split(t).map(h => h.slice(0, h.indexOf(e)));\n return M(O, f);\n }\n if (u) return f;\n var b = d.split(t).map(O => O.slice(O.indexOf(e) + e.length));\n return M(b, f);\n });\n }\n };\n }\n w.keyValueList = pe;\n});\nvar Q = T(I => {\n \"use strict\";\n\n var he = I && I.__awaiter || function (e, t, n, s) {\n function i(r) {\n return r instanceof n ? r : new n(function (a) {\n a(r);\n });\n }\n return new (n || (n = Promise))(function (r, a) {\n function c(o) {\n try {\n l(s.next(o));\n } catch (d) {\n a(d);\n }\n }\n function u(o) {\n try {\n l(s.throw(o));\n } catch (d) {\n a(d);\n }\n }\n function l(o) {\n o.done ? r(o.value) : i(o.value).then(c, u);\n }\n l((s = s.apply(e, t || [])).next());\n });\n };\n Object.defineProperty(I, \"__esModule\", {\n value: !0\n });\n I.ai = void 0;\n var me = 4097,\n ge = 4,\n ve = .8,\n ye = me * ge * ve;\n function be(_ref5) {\n var {\n name: e,\n prompt: t,\n message: n,\n postProcess: s,\n temperature: i,\n splitOn: r\n } = _ref5;\n return {\n scriptTimeout: 15e3,\n custom: (a, c, u) => he(this, void 0, void 0, function* () {\n var l, o;\n var d = yield c({\n command: \"fig\",\n args: [\"settings\", \"--format\", \"json\", \"autocomplete.ai.enabled\"]\n });\n if (!JSON.parse(d.stdout)) return [];\n var g = typeof t == \"function\" ? yield t({\n tokens: a,\n executeCommand: c,\n generatorContext: u\n }) : t,\n S = typeof n == \"function\" ? yield n({\n tokens: a,\n executeCommand: c,\n generatorContext: u\n }) : n;\n if (S === null || S.length === 0) return console.warn(\"No message provided to AI generator\"), [];\n var p = ye - ((l = g === null || g === void 0 ? void 0 : g.length) !== null && l !== void 0 ? l : 0),\n m = {\n model: \"gpt-3.5-turbo\",\n source: \"autocomplete\",\n name: e,\n messages: [...(g ? [{\n role: \"system\",\n content: g\n }] : []), {\n role: \"user\",\n content: S.slice(0, p)\n }],\n temperature: i\n },\n _ = JSON.stringify(m),\n f = yield c({\n command: \"fig\",\n args: [\"_\", \"request\", \"--route\", \"/ai/chat\", \"--method\", \"POST\", \"--body\", _]\n }),\n b = JSON.parse(f.stdout);\n return (o = b === null || b === void 0 ? void 0 : b.choices.map(h => {\n var x;\n return (x = h === null || h === void 0 ? void 0 : h.message) === null || x === void 0 ? void 0 : x.content;\n }).filter(h => typeof h == \"string\").flatMap(h => r ? h.split(r).filter(x => x.trim().length > 0) : [h]).map(h => {\n if (s) return s(h);\n var x = h.trim().replace(/\\n/g, \" \");\n return {\n icon: \"\\u{1FA84}\",\n name: x,\n insertValue: \"'\".concat(x, \"'\"),\n description: \"Generated by Fig AI\"\n };\n })) !== null && o !== void 0 ? o : [];\n })\n };\n }\n I.ai = be;\n});\nvar G = T(y => {\n \"use strict\";\n\n var _e = y && y.__createBinding || (Object.create ? function (e, t, n, s) {\n s === void 0 && (s = n);\n var i = Object.getOwnPropertyDescriptor(t, n);\n (!i || (\"get\" in i ? !t.__esModule : i.writable || i.configurable)) && (i = {\n enumerable: !0,\n get: function get() {\n return t[n];\n }\n }), Object.defineProperty(e, s, i);\n } : function (e, t, n, s) {\n s === void 0 && (s = n), e[s] = t[n];\n }),\n Oe = y && y.__exportStar || function (e, t) {\n for (var n in e) n !== \"default\" && !Object.prototype.hasOwnProperty.call(t, n) && _e(t, e, n);\n };\n Object.defineProperty(y, \"__esModule\", {\n value: !0\n });\n y.ai = y.folders = y.filepaths = void 0;\n var B = K();\n Object.defineProperty(y, \"filepaths\", {\n enumerable: !0,\n get: function get() {\n return B.filepaths;\n }\n });\n Object.defineProperty(y, \"folders\", {\n enumerable: !0,\n get: function get() {\n return B.folders;\n }\n });\n Oe(J(), y);\n var Se = Q();\n Object.defineProperty(y, \"ai\", {\n enumerable: !0,\n get: function get() {\n return Se.ai;\n }\n });\n});\nvar U = ie(G(), 1),\n xe = {\n name: \"ansible-playbook\",\n description: \"Runs Ansible playbooks, executing the defined tasks on the targeted hosts\",\n args: {\n name: \"playbook\",\n description: \"Playbook(s)\",\n isVariadic: !0,\n generators: (0, U.filepaths)({\n extensions: [\"yml\", \"yaml\"]\n })\n },\n options: [{\n name: \"--ask-vault-pass\",\n description: \"Ask for vault password\"\n }, {\n name: \"--flush-cache\",\n description: \"Clears the fact cache for every host in inventory\"\n }, {\n name: \"--force-handlers\",\n description: \"Run handlers even if a task fails\"\n }, {\n name: \"--list-hosts\",\n description: \"Outputs a list of matching hosts; does not execute\"\n }, {\n name: \"--list-tags\",\n description: \"List all available tags\"\n }, {\n name: \"--list-tasks\",\n description: \"List all tasks that would be executed\"\n }, {\n name: \"--skip-tags\",\n description: \"Only run plays and tasks whose tags do not match these values\",\n args: {\n name: \"tags\"\n }\n }, {\n name: \"--start-at-task\",\n description: \"Start the playbook at the task matching this name one-step-at-a-time\",\n args: {\n name: \"task name\"\n }\n }, {\n name: \"--step\",\n description: \"Execute one-step-at-a-time\"\n }, {\n name: \"--syntax-check\",\n description: \"Perform a syntax check on the playbook, but do not execute it\"\n }, {\n name: \"--vault-id\",\n description: \"Specify the vault identity to use\",\n args: {\n name: \"vault ID\"\n }\n }, {\n name: \"--vault-password-file\",\n description: \"Specify a vault password file\",\n args: {\n name: \"vault password file\",\n template: [\"filepaths\"]\n }\n }, {\n name: \"--version\",\n description: \"Show program's version number, config file location, configured module search path, module location and executable location\"\n }, {\n name: [\"--check\", \"-C\"],\n description: \"Don't make any changes; instead, try to predict some of the changes that may occur\"\n }, {\n name: [\"--diff\", \"-D\"],\n description: \"When changing (small) files and templates, show the differences in those files\"\n }, {\n name: [\"--module-path\", \"-M\"],\n description: \"Prepend colon-separated path(s) to module library\",\n args: {\n name: \"module path\",\n template: [\"folders\"]\n }\n }, {\n name: [\"--extra-vars\", \"-e\"],\n description: \"Set additional variables as key=value or YAML/JSON, if filename prepend with @\",\n args: {\n name: \"extra vars\"\n }\n }, {\n name: [\"--forks\", \"-f\"],\n description: \"Specify number of parallel processes to use\",\n args: {\n name: \"forks\"\n }\n }, {\n name: [\"--help\", \"-h\"],\n description: \"Show help for ansible\"\n }, {\n name: [\"--inventory\", \"-i\"],\n description: \"Specify inventory host path or comma separated host list\",\n args: {\n name: \"inventory\"\n }\n }, {\n name: [\"--limit\", \"-l\"],\n description: \"Limit selected hosts to an additional pattern\",\n args: {\n name: \"subset\"\n }\n }, {\n name: [\"--tags\", \"-t\"],\n description: \"Only run plays and tasks tagged with these values\",\n args: {\n name: \"tags\"\n }\n }, {\n name: [\"--verbose\", \"-v\"],\n description: \"Enable verbose mode\"\n }, {\n name: \"-vvv\",\n description: \"Enable very verbose mode\"\n }, {\n name: \"-vvvv\",\n description: \"Enable connection debug mode\"\n }, {\n name: \"--become-method\",\n description: \"Privilege escalation method to use\",\n args: {\n name: \"become method\",\n suggestions: [\"sudo\", \"su\", \"pbrun\", \"pfexec\", \"doas\", \"dzdo\", \"ksu\", \"runas\", \"machinectl\"]\n }\n }, {\n name: \"--become-user\",\n description: \"Privilege escalation user to use\",\n args: {\n name: \"become user\"\n }\n }, {\n name: [\"--ask-become-pass\", \"-K\"],\n description: \"Prompt for privilege escalation password\"\n }, {\n name: [\"--become\", \"-b\"],\n description: \"Run operations with become\"\n }, {\n name: [\"--private-key\", \"--key-file\"],\n description: \"Use this fole to authenticate the connection\",\n args: {\n name: \"private key\",\n template: [\"filepaths\"]\n }\n }, {\n name: \"--scp-extra-args\",\n description: \"Extra arguments to pass to (only) scp\",\n args: {\n name: \"SCP extra args\"\n }\n }, {\n name: \"--sftp-extra-args\",\n description: \"Extra arguments to pass to (only) sftp\",\n args: {\n name: \"SFTP extra args\"\n }\n }, {\n name: \"-ssh-extra-args\",\n description: \"Extra arguments to pass to (only) ssh\",\n args: {\n name: \"SSH extra args\"\n }\n }, {\n name: \"--ssh-common-args\",\n description: \"Extra arguments to pass to sftp/scp/ssh\",\n args: {\n name: \"SSH common args\"\n }\n }, {\n name: [\"--timeout\", \"-T\"],\n description: \"Override the connection timeout in seconds\",\n args: {\n name: \"timeout\"\n }\n }, {\n name: [\"--connection\", \"-c\"],\n description: \"Connection type to use\",\n args: {\n name: \"connection type\"\n }\n }, {\n name: [\"--ask-pass\", \"-k\"],\n description: \"Ask for connection password\"\n }, {\n name: [\"--user\", \"-u\"],\n description: \"Connect as this user\",\n args: {\n name: \"user\"\n }\n }]\n },\n Ve = xe;\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/ansible-playbook.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/ansible.js": +/*!*************************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/ansible.js ***! + \*************************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ a)\n/* harmony export */ });\nvar e = {\n name: \"ansible\",\n description: \"Define and run a single Ansible task 'playbook' against a set of hosts\",\n args: {\n name: \"pattern\",\n description: \"Host pattern\"\n },\n options: [{\n name: \"--ask-vault-pass\",\n description: \"Ask for vault password\"\n }, {\n name: \"--list-hosts\",\n description: \"Outputs a list of matching hosts; does not execute\"\n }, {\n name: \"--playbook-dir\",\n description: \"Since this tool does not use playbooks, use this as a substitute playbook directory.This sets the relative path for many features including roles/ group_vars/etc\",\n args: {\n name: \"playbook dir\",\n template: [\"folders\"]\n }\n }, {\n name: \"--syntax-check\",\n description: \"Perform a syntax check on the playbook, but do not execute it\"\n }, {\n name: \"--vault-id\",\n description: \"Specify the vault identity to use\",\n args: {\n name: \"vault ID\"\n }\n }, {\n name: \"--vault-password-file\",\n description: \"Specify a vault password file\",\n args: {\n name: \"vault password file\",\n template: [\"filepaths\"]\n }\n }, {\n name: \"--version\",\n description: \"Show program's version number, config file location, configured module search path, module location and executable location\"\n }, {\n name: [\"--background\", \"-B\"],\n description: \"Run asynchronously, failing after specified seconds\",\n args: {\n name: \"seconds\"\n }\n }, {\n name: [\"--check\", \"-C\"],\n description: \"Don't make any changes; instead, try to predict some of the changes that may occur\"\n }, {\n name: [\"--diff\", \"-D\"],\n description: \"When changing (small) files and templates, show the differences in those files\"\n }, {\n name: [\"--module-path\", \"-M\"],\n description: \"Prepend colon-separated path(s) to module library\",\n args: {\n name: \"module path\",\n template: [\"folders\"]\n }\n }, {\n name: [\"--poll\", \"-P\"],\n description: \"Set the poll interval if using -B\",\n args: {\n name: \"poll interval\"\n }\n }, {\n name: [\"--args\", \"-a\"],\n description: \"Specify module arguments\",\n args: {\n name: \"module arguments\"\n }\n }, {\n name: [\"--extra-vars\", \"-e\"],\n description: \"Set additional variables as key=value or YAML/JSON, if filename prepend with @\",\n args: {\n name: \"extra vars\"\n }\n }, {\n name: [\"--forks\", \"-f\"],\n description: \"Specify number of parallel processes to use\",\n args: {\n name: \"forks\"\n }\n }, {\n name: [\"--help\", \"-h\"],\n description: \"Show help for ansible\"\n }, {\n name: [\"--inventory\", \"-i\"],\n description: \"Specify inventory host path or comma separated host list\",\n args: {\n name: \"inventory\"\n }\n }, {\n name: [\"--limit\", \"-l\"],\n description: \"Limit selected hosts to an additional pattern\",\n args: {\n name: \"subset\"\n }\n }, {\n name: [\"--module-name\", \"-m\"],\n description: \"Specify the module name to execute\",\n args: {\n name: \"module name\"\n }\n }, {\n name: [\"--one-line\", \"-o\"],\n description: \"Condense output\"\n }, {\n name: [\"--tree\", \"-t\"],\n description: \"Log output to specific directory\",\n args: {\n name: \"tree\"\n }\n }, {\n name: [\"--verbose\", \"-v\"],\n description: \"Enable verbose mode\"\n }, {\n name: \"-vvv\",\n description: \"Enable very verbose mode\"\n }, {\n name: \"-vvvv\",\n description: \"Enable connection debug mode\"\n }, {\n name: \"--become-method\",\n description: \"Privilege escalation method to use\",\n args: {\n name: \"become method\",\n suggestions: [\"sudo\", \"su\", \"pbrun\", \"pfexec\", \"doas\", \"dzdo\", \"ksu\", \"runas\", \"machinectl\"]\n }\n }, {\n name: \"--become-user\",\n description: \"Privilege escalation user to use\",\n args: {\n name: \"become user\"\n }\n }, {\n name: [\"--ask-become-pass\", \"-K\"],\n description: \"Prompt for privilege escalation password\"\n }, {\n name: [\"--become\", \"-b\"],\n description: \"Run operations with become\"\n }, {\n name: [\"--private-key\", \"--key-file\"],\n description: \"Use this fole to authenticate the connection\",\n args: {\n name: \"private key\",\n template: [\"filepaths\"]\n }\n }, {\n name: \"--scp-extra-args\",\n description: \"Extra arguments to pass to (only) scp\",\n args: {\n name: \"SCP extra args\"\n }\n }, {\n name: \"--sftp-extra-args\",\n description: \"Extra arguments to pass to (only) sftp\",\n args: {\n name: \"SFTP extra args\"\n }\n }, {\n name: \"-ssh-extra-args\",\n description: \"Extra arguments to pass to (only) ssh\",\n args: {\n name: \"SSH extra args\"\n }\n }, {\n name: \"--ssh-common-args\",\n description: \"Extra arguments to pass to sftp/scp/ssh\",\n args: {\n name: \"SSH common args\"\n }\n }, {\n name: [\"--timeout\", \"-T\"],\n description: \"Override the connection timeout in seconds\",\n args: {\n name: \"timeout\"\n }\n }, {\n name: [\"--connection\", \"-c\"],\n description: \"Connection type to use\",\n args: {\n name: \"connection type\"\n }\n }, {\n name: [\"--ask-pass\", \"-k\"],\n description: \"Ask for connection password\"\n }, {\n name: [\"--user\", \"-u\"],\n description: \"Connect as this user\",\n args: {\n name: \"user\"\n }\n }]\n },\n a = e;\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/ansible.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/aws.js": +/*!*********************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/aws.js ***! + \*********************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ awsProfileGenerator: () => (/* binding */ t),\n/* harmony export */ \"default\": () => (/* binding */ s)\n/* harmony export */ });\nfunction asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }\nfunction _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, \"next\", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, \"throw\", err); } _next(undefined); }); }; }\nvar t = {\n cache: {\n strategy: \"stale-while-revalidate\",\n cacheByDirectory: !0\n },\n script: [\"aws\", \"configure\", \"list-profiles\"],\n postProcess: function postProcess(e) {\n return e.trim() == \"\" ? [] : e.split(\"\\n\").map(a => ({\n name: a,\n icon: \"\\u{1F464}\"\n }));\n }\n },\n n = {\n name: \"aws\",\n generateSpec(e, a) {\n return _asyncToGenerator(function* () {\n var {\n stdout: o\n } = yield a({\n command: \"bash\",\n args: [\"-c\", \"ls ~/.aws/credentials && ls ~/.aws/config\"]\n });\n return {\n name: \"aws\",\n subcommands: [{\n name: \"configure\",\n description: \"Configure AWS CLI options. If this command is run with no\\narguments, you will be prompted for configuration values such as your AWS\\nAccess Key Id and your AWS Secret Access Key. You can configure a named\\nprofile using the ``--profile`` argument. If your config file does not exist\\n(the default location is ``~/.aws/config``), the AWS CLI will create it\\nfor you. To keep an existing value, hit enter when prompted for the value.\\nWhen you are prompted for information, the current value will be displayed in\\n``[brackets]``. If the config item has no value, it be displayed as\\n``[None]``. Note that the ``configure`` command only works with values from the\\nconfig file. It does not use any configuration values from environment\\nvariables or the IAM role.\\n\\nNote: the values you provide for the AWS Access Key ID and the AWS Secret\\nAccess Key will be written to the shared credentials file\\n(``~/.aws/credentials``).\\n\\n\\n=======================\\nConfiguration Variables\\n=======================\\n\\nThe following configuration variables are supported in the config file:\\n\\n* **aws_access_key_id** - The AWS access key part of your credentials\\n* **aws_secret_access_key** - The AWS secret access key part of your credentials\\n* **aws_session_token** - The session token part of your credentials (session tokens only)\\n* **metadata_service_timeout** - The number of seconds to wait until the metadata service\\n request times out. This is used if you are using an IAM role to provide\\n your credentials.\\n* **metadata_service_num_attempts** - The number of attempts to try to retrieve\\n credentials. If you know for certain you will be using an IAM role on an\\n Amazon EC2 instance, you can set this value to ensure any intermittent\\n failures are retried. By default this value is 1.\\n\\nFor more information on configuration options, see `Configuring the AWS Command Line Interface`_ in the *AWS CLI User Guide*.\\n\\n.. _`Configuring the AWS Command Line Interface`: http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html\\n\",\n loadSpec: \"aws/configure\",\n icon: \"fig://icon?type=alert\",\n priority: o.includes(\"No such file or directory\") ? 100 : 50\n }]\n };\n })();\n },\n options: [{\n name: \"--profile\",\n description: \"Use a specific profile from your credential file\",\n args: {\n generators: t,\n filterStrategy: \"fuzzy\"\n }\n }],\n subcommands: [{\n name: \"accessanalyzer\",\n description: \"AWS IAM Access Analyzer helps identify potential resource-access risks by enabling you to identify any policies that grant access to an external principal. It does this by using logic-based reasoning to analyze resource-based policies in your AWS environment. An external principal can be another AWS account, a root user, an IAM user or role, a federated user, an AWS service, or an anonymous user. You can also use Access Analyzer to preview and validate public and cross-account access to your resources before deploying permissions changes. This guide describes the AWS IAM Access Analyzer operations that you can call programmatically. For general information about Access Analyzer, see AWS IAM Access Analyzer in the IAM User Guide. To start using Access Analyzer, you first need to create an analyzer\",\n loadSpec: \"aws/accessanalyzer\"\n }, {\n name: \"acm\",\n description: \"AWS Certificate Manager You can use AWS Certificate Manager (ACM) to manage SSL/TLS certificates for your AWS-based websites and applications. For more information about using ACM, see the AWS Certificate Manager User Guide\",\n loadSpec: \"aws/acm\"\n }, {\n name: \"acm-pca\",\n description: \"This is the ACM Private CA API Reference. It provides descriptions, syntax, and usage examples for each of the actions and data types involved in creating and managing private certificate authorities (CA) for your organization. The documentation for each action shows the Query API request parameters and the XML response. Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs. Each ACM Private CA API action has a quota that determines the number of times the action can be called per second. For more information, see API Rate Quotas in ACM Private CA in the ACM Private CA user guide\",\n loadSpec: \"aws/acm-pca\"\n }, {\n name: \"alexaforbusiness\",\n description: \"Alexa for Business helps you use Alexa in your organization. Alexa for Business provides you with the tools to manage Alexa devices, enroll your users, and assign skills, at scale. You can build your own context-aware voice skills using the Alexa Skills Kit and the Alexa for Business API operations. You can also make these available as private skills for your organization. Alexa for Business makes it efficient to voice-enable your products and services, thus providing context-aware voice experiences for your customers. Device makers building with the Alexa Voice Service (AVS) can create fully integrated solutions, register their products with Alexa for Business, and manage them as shared devices in their organization\",\n loadSpec: \"aws/alexaforbusiness\"\n }, {\n name: \"amp\",\n description: \"Amazon Managed Service for Prometheus\",\n loadSpec: \"aws/amp\"\n }, {\n name: \"amplify\",\n description: \"Amplify enables developers to develop and deploy cloud-powered mobile and web apps. The Amplify Console provides a continuous delivery and hosting service for web applications. For more information, see the Amplify Console User Guide. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation for client app development. For more information, see the Amplify Framework\",\n loadSpec: \"aws/amplify\"\n }, {\n name: \"amplifybackend\",\n description: \"AWS Amplify Admin API\",\n loadSpec: \"aws/amplifybackend\"\n }, {\n name: \"apigateway\",\n description: \"Amazon API Gateway Amazon API Gateway helps developers deliver robust, secure, and scalable mobile and web application back ends. API Gateway allows developers to securely connect mobile and web applications to APIs that run on AWS Lambda, Amazon EC2, or other publicly addressable web services that are hosted outside of AWS\",\n loadSpec: \"aws/apigateway\"\n }, {\n name: \"apigatewaymanagementapi\",\n description: \"The Amazon API Gateway Management API allows you to directly manage runtime aspects of your deployed APIs. To use it, you must explicitly set the SDK's endpoint to point to the endpoint of your deployed API. The endpoint will be of the form https://{api-id}.execute-api.{region}.amazonaws.com/{stage}, or will be the endpoint corresponding to your API's custom domain and base path, if applicable\",\n loadSpec: \"aws/apigatewaymanagementapi\"\n }, {\n name: \"apigatewayv2\",\n description: \"Amazon API Gateway V2\",\n loadSpec: \"aws/apigatewayv2\"\n }, {\n name: \"appconfig\",\n description: \"AWS AppConfig Use AWS AppConfig, a capability of AWS Systems Manager, to create, manage, and quickly deploy application configurations. AppConfig supports controlled deployments to applications of any size and includes built-in validation checks and monitoring. You can use AppConfig with applications hosted on Amazon EC2 instances, AWS Lambda, containers, mobile applications, or IoT devices. To prevent errors when deploying application configurations, especially for production systems where a simple typo could cause an unexpected outage, AppConfig includes validators. A validator provides a syntactic or semantic check to ensure that the configuration you want to deploy works as intended. To validate your application configuration data, you provide a schema or a Lambda function that runs against the configuration. The configuration deployment or update can only proceed when the configuration data is valid. During a configuration deployment, AppConfig monitors the application to ensure that the deployment is successful. If the system encounters an error, AppConfig rolls back the change to minimize impact for your application users. You can configure a deployment strategy for each application or environment that includes deployment criteria, including velocity, bake time, and alarms to monitor. Similar to error monitoring, if a deployment triggers an alarm, AppConfig automatically rolls back to the previous version. AppConfig supports multiple use cases. Here are some examples. Application tuning: Use AppConfig to carefully introduce changes to your application that can only be tested with production traffic. Feature toggle: Use AppConfig to turn on new features that require a timely deployment, such as a product launch or announcement. Allow list: Use AppConfig to allow premium subscribers to access paid content. Operational issues: Use AppConfig to reduce stress on your application when a dependency or other external factor impacts the system. This reference is intended to be used with the AWS AppConfig User Guide\",\n loadSpec: \"aws/appconfig\"\n }, {\n name: \"appflow\",\n description: \"Welcome to the Amazon AppFlow API reference. This guide is for developers who need detailed information about the Amazon AppFlow API operations, data types, and errors. Amazon AppFlow is a fully managed integration service that enables you to securely transfer data between software as a service (SaaS) applications like Salesforce, Marketo, Slack, and ServiceNow, and AWS services like Amazon S3 and Amazon Redshift. Use the following links to get started on the Amazon AppFlow API: Actions: An alphabetical list of all Amazon AppFlow API operations. Data types: An alphabetical list of all Amazon AppFlow data types. Common parameters: Parameters that all Query operations can use. Common errors: Client and server errors that all operations can return. If you're new to Amazon AppFlow, we recommend that you review the Amazon AppFlow User Guide. Amazon AppFlow API users can use vendor-specific mechanisms for OAuth, and include applicable OAuth attributes (such as auth-code and redirecturi) with the connector-specific ConnectorProfileProperties when creating a new connector profile using Amazon AppFlow API operations. For example, Salesforce users can refer to the Authorize Apps with OAuth documentation\",\n loadSpec: \"aws/appflow\"\n }, {\n name: \"appintegrations\",\n description: \"The Amazon AppIntegrations APIs are in preview release and are subject to change. The Amazon AppIntegrations service enables you to configure and reuse connections to external applications. For information about how you can use external applications with Amazon Connect, see Set up pre-built integrations in the Amazon Connect Administrator Guide\",\n loadSpec: \"aws/appintegrations\"\n }, {\n name: \"application-autoscaling\",\n description: \"With Application Auto Scaling, you can configure automatic scaling for the following resources: Amazon ECS services Amazon EC2 Spot Fleet requests Amazon EMR clusters Amazon AppStream 2.0 fleets Amazon DynamoDB tables and global secondary indexes throughput capacity Amazon Aurora Replicas Amazon SageMaker endpoint variants Custom resources provided by your own applications or services Amazon Comprehend document classification and entity recognizer endpoints AWS Lambda function provisioned concurrency Amazon Keyspaces (for Apache Cassandra) tables Amazon Managed Streaming for Apache Kafka broker storage API Summary The Application Auto Scaling service API includes three key sets of actions: Register and manage scalable targets - Register AWS or custom resources as scalable targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and retrieve information on existing scalable targets. Configure and manage automatic scaling - Define scaling policies to dynamically scale your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve your recent scaling activity history. Suspend and resume scaling - Temporarily suspend and later resume automatic scaling by calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can suspend and resume (individually or in combination) scale-out activities that are triggered by a scaling policy, scale-in activities that are triggered by a scaling policy, and scheduled scaling. To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide\",\n loadSpec: \"aws/application-autoscaling\"\n }, {\n name: \"application-insights\",\n description: \"Amazon CloudWatch Application Insights Amazon CloudWatch Application Insights is a service that helps you detect common problems with your applications. It enables you to pinpoint the source of issues in your applications (built with technologies such as Microsoft IIS, .NET, and Microsoft SQL Server), by providing key insights into detected problems. After you onboard your application, CloudWatch Application Insights identifies, recommends, and sets up metrics and logs. It continuously analyzes and correlates your metrics and logs for unusual behavior to surface actionable problems with your application. For example, if your application is slow and unresponsive and leading to HTTP 500 errors in your Application Load Balancer (ALB), Application Insights informs you that a memory pressure problem with your SQL Server database is occurring. It bases this analysis on impactful metrics and log errors\",\n loadSpec: \"aws/application-insights\"\n }, {\n name: \"appmesh\",\n description: \"AWS App Mesh is a service mesh based on the Envoy proxy that makes it easy to monitor and control microservices. App Mesh standardizes how your microservices communicate, giving you end-to-end visibility and helping to ensure high availability for your applications. App Mesh gives you consistent visibility and network traffic controls for every microservice in an application. You can use App Mesh with AWS Fargate, Amazon ECS, Amazon EKS, Kubernetes on AWS, and Amazon EC2. App Mesh supports microservice applications that use service discovery naming for their components. For more information about service discovery on Amazon ECS, see Service Discovery in the Amazon Elastic Container Service Developer Guide. Kubernetes kube-dns and coredns are supported. For more information, see DNS for Services and Pods in the Kubernetes documentation\",\n loadSpec: \"aws/appmesh\"\n }, {\n name: \"appstream\",\n description: \"Amazon AppStream 2.0 This is the Amazon AppStream 2.0 API Reference. This documentation provides descriptions and syntax for each of the actions and data types in AppStream 2.0. AppStream 2.0 is a fully managed, secure application streaming service that lets you stream desktop applications to users without rewriting applications. AppStream 2.0 manages the AWS resources that are required to host and run your applications, scales automatically, and provides access to your users on demand. You can call the AppStream 2.0 API operations by using an interface VPC endpoint (interface endpoint). For more information, see Access AppStream 2.0 API Operations and CLI Commands Through an Interface VPC Endpoint in the Amazon AppStream 2.0 Administration Guide. To learn more about AppStream 2.0, see the following resources: Amazon AppStream 2.0 product page Amazon AppStream 2.0 documentation\",\n loadSpec: \"aws/appstream\"\n }, {\n name: \"appsync\",\n description: \"AWS AppSync provides API actions for creating and interacting with data sources using GraphQL from your application\",\n loadSpec: \"aws/appsync\"\n }, {\n name: \"athena\",\n description: \"Amazon Athena is an interactive query service that lets you use standard SQL to analyze data directly in Amazon S3. You can point Athena at your data in Amazon S3 and run ad-hoc queries and get results in seconds. Athena is serverless, so there is no infrastructure to set up or manage. You pay only for the queries you run. Athena scales automatically\\u2014executing queries in parallel\\u2014so results are fast, even with large datasets and complex queries. For more information, see What is Amazon Athena in the Amazon Athena User Guide. If you connect to Athena using the JDBC driver, use version 1.1.0 of the driver or later with the Amazon Athena API. Earlier version drivers do not support the API. For more information and to download the driver, see Accessing Amazon Athena with JDBC. For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide\",\n loadSpec: \"aws/athena\"\n }, {\n name: \"auditmanager\",\n description: \"Welcome to the AWS Audit Manager API reference. This guide is for developers who need detailed information about the AWS Audit Manager API operations, data types, and errors. AWS Audit Manager is a service that provides automated evidence collection so that you can continuously audit your AWS usage, and assess the effectiveness of your controls to better manage risk and simplify compliance. AWS Audit Manager provides pre-built frameworks that structure and automate assessments for a given compliance standard. Frameworks include a pre-built collection of controls with descriptions and testing procedures, which are grouped according to the requirements of the specified compliance standard or regulation. You can also customize frameworks and controls to support internal audits with unique requirements. Use the following links to get started with the AWS Audit Manager API: Actions: An alphabetical list of all AWS Audit Manager API operations. Data types: An alphabetical list of all AWS Audit Manager data types. Common parameters: Parameters that all Query operations can use. Common errors: Client and server errors that all operations can return. If you're new to AWS Audit Manager, we recommend that you review the AWS Audit Manager User Guide\",\n loadSpec: \"aws/auditmanager\"\n }, {\n name: \"autoscaling\",\n description: \"Amazon EC2 Auto Scaling Amazon EC2 Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined scaling policies, scheduled actions, and health checks. Use this service with AWS Auto Scaling, Amazon CloudWatch, and Elastic Load Balancing. For more information, including information about granting IAM users required permissions for Amazon EC2 Auto Scaling actions, see the Amazon EC2 Auto Scaling User Guide\",\n loadSpec: \"aws/autoscaling\"\n }, {\n name: \"autoscaling-plans\",\n description: \"AWS Auto Scaling Use AWS Auto Scaling to create scaling plans for your applications to automatically scale your scalable AWS resources. API Summary You can use the AWS Auto Scaling service API to accomplish the following tasks: Create and manage scaling plans Define target tracking scaling policies to dynamically scale your resources based on utilization Scale Amazon EC2 Auto Scaling groups using predictive scaling and dynamic scaling to scale your Amazon EC2 capacity faster Set minimum and maximum capacity limits Retrieve information on existing scaling plans Access current forecast data and historical forecast data for up to 56 days previous To learn more about AWS Auto Scaling, including information about granting IAM users required permissions for AWS Auto Scaling actions, see the AWS Auto Scaling User Guide\",\n loadSpec: \"aws/autoscaling-plans\"\n }, {\n name: \"backup\",\n description: \"AWS Backup AWS Backup is a unified backup service designed to protect AWS services and their associated data. AWS Backup simplifies the creation, migration, restoration, and deletion of backups, while also providing reporting and auditing\",\n loadSpec: \"aws/backup\"\n }, {\n name: \"batch\",\n description: \"Using AWS Batch, you can run batch computing workloads on the AWS Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. AWS Batch uses the advantages of this computing workload to remove the undifferentiated heavy lifting of configuring and managing required infrastructure. At the same time, it also adopts a familiar batch computing software approach. Given these advantages, AWS Batch can help you to efficiently provision resources in response to jobs submitted, thus effectively helping you to eliminate capacity constraints, reduce compute costs, and deliver your results more quickly. As a fully managed service, AWS Batch can run batch computing workloads of any scale. AWS Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With AWS Batch, there's no need to install or manage batch computing software. This means that you can focus your time and energy on analyzing results and solving your specific problems\",\n loadSpec: \"aws/batch\"\n }, {\n name: \"braket\",\n description: \"The Amazon Braket API Reference provides information about the operations and structures supported in Amazon Braket\",\n loadSpec: \"aws/braket\"\n }, {\n name: \"budgets\",\n description: \"The AWS Budgets API enables you to use AWS Budgets to plan your service usage, service costs, and instance reservations. The API reference provides descriptions, syntax, and usage examples for each of the actions and data types for AWS Budgets. Budgets provide you with a way to see the following information: How close your plan is to your budgeted amount or to the free tier limits Your usage-to-date, including how much you've used of your Reserved Instances (RIs) Your current estimated charges from AWS, and how much your predicted usage will accrue in charges by the end of the month How much of your budget has been used AWS updates your budget status several times a day. Budgets track your unblended costs, subscriptions, refunds, and RIs. You can create the following types of budgets: Cost budgets - Plan how much you want to spend on a service. Usage budgets - Plan how much you want to use one or more services. RI utilization budgets - Define a utilization threshold, and receive alerts when your RI usage falls below that threshold. This lets you see if your RIs are unused or under-utilized. RI coverage budgets - Define a coverage threshold, and receive alerts when the number of your instance hours that are covered by RIs fall below that threshold. This lets you see how much of your instance usage is covered by a reservation. Service Endpoint The AWS Budgets API provides the following endpoint: https://budgets.amazonaws.com For information about costs that are associated with the AWS Budgets API, see AWS Cost Management Pricing\",\n loadSpec: \"aws/budgets\"\n }, {\n name: \"ce\",\n description: \"The Cost Explorer API enables you to programmatically query your cost and usage data. You can query for aggregated data such as total monthly costs or total daily usage. You can also query for granular data, such as the number of daily write operations for Amazon DynamoDB database tables in your production environment. Service Endpoint The Cost Explorer API provides the following endpoint: https://ce.us-east-1.amazonaws.com For information about costs associated with the Cost Explorer API, see AWS Cost Management Pricing\",\n loadSpec: \"aws/ce\"\n }, {\n name: \"chime\",\n description: \"The Amazon Chime API (application programming interface) is designed for developers to perform key tasks, such as creating and managing Amazon Chime accounts, users, and Voice Connectors. This guide provides detailed information about the Amazon Chime API, including operations, types, inputs and outputs, and error codes. It also includes some server-side API actions to use with the Amazon Chime SDK. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide. You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the REST API to make API calls. We recommend using an AWS SDK or the AWS CLI. Each API operation includes links to information about using it with a language-specific AWS SDK or the AWS CLI. Using an AWS SDK You don't need to write code to calculate a signature for request authentication. The SDK clients authenticate your requests by using access keys that you provide. For more information about AWS SDKs, see the AWS Developer Center. Using the AWS CLI Use your access keys with the AWS CLI to make API calls. For information about setting up the AWS CLI, see Installing the AWS Command Line Interface in the AWS Command Line Interface User Guide. For a list of available Amazon Chime commands, see the Amazon Chime commands in the AWS CLI Command Reference. Using REST APIs If you use REST to make API calls, you must authenticate your request by providing a signature. Amazon Chime supports signature version 4. For more information, see Signature Version 4 Signing Process in the Amazon Web Services General Reference. When making REST API calls, use the service name chime and REST endpoint https://service.chime.aws.amazon.com. Administrative permissions are controlled using AWS Identity and Access Management (IAM). For more information, see Identity and Access Management for Amazon Chime in the Amazon Chime Administration Guide\",\n loadSpec: \"aws/chime\"\n }, {\n name: \"cloud9\",\n description: \"AWS Cloud9 AWS Cloud9 is a collection of tools that you can use to code, build, run, test, debug, and release software in the cloud. For more information about AWS Cloud9, see the AWS Cloud9 User Guide. AWS Cloud9 supports these operations: CreateEnvironmentEC2: Creates an AWS Cloud9 development environment, launches an Amazon EC2 instance, and then connects from the instance to the environment. CreateEnvironmentMembership: Adds an environment member to an environment. DeleteEnvironment: Deletes an environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance. DeleteEnvironmentMembership: Deletes an environment member from an environment. DescribeEnvironmentMemberships: Gets information about environment members for an environment. DescribeEnvironments: Gets information about environments. DescribeEnvironmentStatus: Gets status information for an environment. ListEnvironments: Gets a list of environment identifiers. ListTagsForResource: Gets the tags for an environment. TagResource: Adds tags to an environment. UntagResource: Removes tags from an environment. UpdateEnvironment: Changes the settings of an existing environment. UpdateEnvironmentMembership: Changes the settings of an existing environment member for an environment\",\n loadSpec: \"aws/cloud9\"\n }, {\n name: \"clouddirectory\",\n description: \"Amazon Cloud Directory Amazon Cloud Directory is a component of the AWS Directory Service that simplifies the development and management of cloud-scale web, mobile, and IoT applications. This guide describes the Cloud Directory operations that you can call programmatically and includes detailed information on data types and errors. For information about Cloud Directory features, see AWS Directory Service and the Amazon Cloud Directory Developer Guide\",\n loadSpec: \"aws/clouddirectory\"\n }, {\n name: \"cloudformation\",\n description: \"AWS CloudFormation AWS CloudFormation allows you to create and manage AWS infrastructure deployments predictably and repeatedly. You can use AWS CloudFormation to leverage AWS products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly-reliable, highly scalable, cost-effective applications without creating or configuring the underlying AWS infrastructure. With AWS CloudFormation, you declare all of your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. AWS CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you. For more information about AWS CloudFormation, see the AWS CloudFormation Product Page. Amazon CloudFormation makes use of other AWS products. If you need additional technical information about a specific AWS product, you can find the product's technical documentation at docs.aws.amazon.com\",\n loadSpec: \"aws/cloudformation\"\n }, {\n name: \"cloudfront\",\n description: \"Amazon CloudFront This is the Amazon CloudFront API Reference. This guide is for developers who need detailed information about CloudFront API actions, data types, and errors. For detailed information about CloudFront features, see the Amazon CloudFront Developer Guide\",\n loadSpec: \"aws/cloudfront\"\n }, {\n name: \"cloudhsm\",\n description: \"AWS CloudHSM Service This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference. For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference\",\n loadSpec: \"aws/cloudhsm\"\n }, {\n name: \"cloudhsmv2\",\n description: \"For more information about AWS CloudHSM, see AWS CloudHSM and the AWS CloudHSM User Guide\",\n loadSpec: \"aws/cloudhsmv2\"\n }, {\n name: \"cloudsearch\",\n description: \"Amazon CloudSearch Configuration Service You use the Amazon CloudSearch configuration service to create, configure, and manage search domains. Configuration service requests are submitted using the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests submitted via HTTP GET or POST with a query parameter named Action. The endpoint for configuration service requests is region-specific: cloudsearch.region.amazonaws.com. For example, cloudsearch.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints\",\n loadSpec: \"aws/cloudsearch\"\n }, {\n name: \"cloudsearchdomain\",\n description: \"You use the AmazonCloudSearch2013 API to upload documents to a search domain and search those documents. The endpoints for submitting UploadDocuments, Search, and Suggest requests are domain-specific. To get the endpoints for your domain, use the Amazon CloudSearch configuration service DescribeDomains action. The domain endpoints are also displayed on the domain dashboard in the Amazon CloudSearch console. You submit suggest requests to the search endpoint. For more information, see the Amazon CloudSearch Developer Guide\",\n loadSpec: \"aws/cloudsearchdomain\"\n }, {\n name: \"cloudtrail\",\n description: \"AWS CloudTrail This is the CloudTrail API Reference. It provides descriptions of actions, data types, common parameters, and common errors for CloudTrail. CloudTrail is a web service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. The recorded information includes the identity of the user, the start time of the AWS API call, the source IP address, the request parameters, and the response elements returned by the service. As an alternative to the API, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to AWSCloudTrail. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For information about the AWS SDKs, including how to download and install them, see the Tools for Amazon Web Services page. See the AWS CloudTrail User Guide for information about the data that is included with each AWS API call listed in the log files\",\n loadSpec: \"aws/cloudtrail\"\n }, {\n name: \"cloudwatch\",\n description: \"Amazon CloudWatch monitors your Amazon Web Services (AWS) resources and the applications you run on AWS in real time. You can use CloudWatch to collect and track metrics, which are the variables you want to measure for your resources and applications. CloudWatch alarms send notifications or automatically change the resources you are monitoring based on rules that you define. For example, you can monitor the CPU usage and disk reads and writes of your Amazon EC2 instances. Then, use this data to determine whether you should launch additional instances to handle increased load. You can also use this data to stop under-used instances to save money. In addition to monitoring the built-in metrics that come with AWS, you can monitor your own custom metrics. With CloudWatch, you gain system-wide visibility into resource utilization, application performance, and operational health\",\n loadSpec: \"aws/cloudwatch\"\n }, {\n name: \"codeartifact\",\n description: \"AWS CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, and pip. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client. AWS CodeArtifact Components Use the information in this guide to help you work with the following CodeArtifact components: Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools like the npm CLI, the Maven CLI ( mvn ), and pip . Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in AWS Key Management Service (AWS KMS). Each repository is a member of a single domain and can't be moved to a different domain. The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages. Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization. Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, and Maven package formats. In CodeArtifact, a package consists of: A name (for example, webpack is the name of a popular npm package) An optional namespace (for example, @types in @types/node) A set of versions (for example, 1.0.0, 1.0.1, 1.0.2, etc.) Package-level metadata (for example, npm tags) Package version: A version of a package, such as @types/node 12.6.9. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets. Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories. Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz file or Maven POM and JAR files. CodeArtifact supports these operations: AssociateExternalConnection: Adds an existing external connection to a repository. CopyPackageVersions: Copies package versions from one repository to another repository in the same domain. CreateDomain: Creates a domain CreateRepository: Creates a CodeArtifact repository in a domain. DeleteDomain: Deletes a domain. You cannot delete a domain that contains repositories. DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain. DeletePackageVersions: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage. DeleteRepository: Deletes a repository. DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository. DescribeDomain: Returns a DomainDescription object that contains information about the requested domain. DescribePackageVersion: Returns a PackageVersionDescription object that contains details about a package version. DescribeRepository: Returns a RepositoryDescription object that contains detailed information about the requested repository. DisposePackageVersions: Disposes versions of a package. A package version with the status Disposed cannot be restored because they have been permanently removed from storage. DisassociateExternalConnection: Removes an existing external connection from a repository. GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours. GetDomainPermissionsPolicy: Returns the policy of a resource that is attached to the specified domain. GetPackageVersionAsset: Returns the contents of an asset that is in a package version. GetPackageVersionReadme: Gets the readme file or descriptive text for a package version. GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format: npm pypi maven GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository. ListDomains: Returns a list of DomainSummary objects. Each returned DomainSummary object contains information about a domain. ListPackages: Lists the packages in a repository. ListPackageVersionAssets: Lists the assets for a given package version. ListPackageVersionDependencies: Returns a list of the direct dependencies for a package version. ListPackageVersions: Returns a list of package versions for a specified package in a repository. ListRepositories: Returns a list of repositories owned by the AWS account that called this method. ListRepositoriesInDomain: Returns a list of the repositories in a domain. PutDomainPermissionsPolicy: Attaches a resource policy to a domain. PutRepositoryPermissionsPolicy: Sets the resource policy on a repository that specifies permissions to access it. UpdatePackageVersionsStatus: Updates the status of one or more versions of a package. UpdateRepository: Updates the properties of a repository\",\n loadSpec: \"aws/codeartifact\"\n }, {\n name: \"codebuild\",\n description: \"AWS CodeBuild AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide\",\n loadSpec: \"aws/codebuild\"\n }, {\n name: \"codecommit\",\n description: \"AWS CodeCommit This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples. You can use the AWS CodeCommit API to work with the following objects: Repositories, by calling the following: BatchGetRepositories, which returns information about one or more repositories associated with your AWS account. CreateRepository, which creates an AWS CodeCommit repository. DeleteRepository, which deletes an AWS CodeCommit repository. GetRepository, which returns information about a specified repository. ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account. UpdateRepositoryDescription, which sets or updates the description of the repository. UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository can access it until you send them the new HTTPS or SSH URL to use. Branches, by calling the following: CreateBranch, which creates a branch in a specified repository. DeleteBranch, which deletes the specified branch in a repository unless it is the default branch. GetBranch, which returns information about a specified branch. ListBranches, which lists all branches for a specified repository. UpdateDefaultBranch, which changes the default branch for a repository. Files, by calling the following: DeleteFile, which deletes the content of a specified file from a specified branch. GetBlob, which returns the base-64 encoded content of an individual Git blob object in a repository. GetFile, which returns the base-64 encoded content of a specified file. GetFolder, which returns the contents of a specified folder or directory. PutFile, which adds or modifies a single file in a specified repository and branch. Commits, by calling the following: BatchGetCommits, which returns information about one or more commits in a repository. CreateCommit, which creates a commit for changes to a repository. GetCommit, which returns information about a commit, including commit messages and author and committer information. GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other fully qualified reference). Merges, by calling the following: BatchDescribeMergeConflicts, which returns information about conflicts in a merge between commits in a repository. CreateUnreferencedMergeCommit, which creates an unreferenced commit between two branches or commits for the purpose of comparing them and identifying any potential conflicts. DescribeMergeConflicts, which returns information about merge conflicts between the base, source, and destination versions of a file in a potential merge. GetMergeCommit, which returns information about the merge between a source and destination commit. GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request. GetMergeOptions, which returns information about the available merge options between two branches or commit specifiers. MergeBranchesByFastForward, which merges two branches using the fast-forward merge option. MergeBranchesBySquash, which merges two branches using the squash merge option. MergeBranchesByThreeWay, which merges two branches using the three-way merge option. Pull requests, by calling the following: CreatePullRequest, which creates a pull request in a specified repository. CreatePullRequestApprovalRule, which creates an approval rule for a specified pull request. DeletePullRequestApprovalRule, which deletes an approval rule for a specified pull request. DescribePullRequestEvents, which returns information about one or more pull request events. EvaluatePullRequestApprovalRules, which evaluates whether a pull request has met all the conditions specified in its associated approval rules. GetCommentsForPullRequest, which returns information about comments on a specified pull request. GetPullRequest, which returns information about a specified pull request. GetPullRequestApprovalStates, which returns information about the approval states for a specified pull request. GetPullRequestOverrideState, which returns information about whether approval rules have been set aside (overridden) for a pull request, and if so, the Amazon Resource Name (ARN) of the user or identity that overrode the rules and their requirements for the pull request. ListPullRequests, which lists all pull requests for a repository. MergePullRequestByFastForward, which merges the source destination branch of a pull request into the specified destination branch for that pull request using the fast-forward merge option. MergePullRequestBySquash, which merges the source destination branch of a pull request into the specified destination branch for that pull request using the squash merge option. MergePullRequestByThreeWay. which merges the source destination branch of a pull request into the specified destination branch for that pull request using the three-way merge option. OverridePullRequestApprovalRules, which sets aside all approval rule requirements for a pull request. PostCommentForPullRequest, which posts a comment to a pull request at the specified line, file, or request. UpdatePullRequestApprovalRuleContent, which updates the structure of an approval rule for a pull request. UpdatePullRequestApprovalState, which updates the state of an approval on a pull request. UpdatePullRequestDescription, which updates the description of a pull request. UpdatePullRequestStatus, which updates the status of a pull request. UpdatePullRequestTitle, which updates the title of a pull request. Approval rule templates, by calling the following: AssociateApprovalRuleTemplateWithRepository, which associates a template with a specified repository. After the template is associated with a repository, AWS CodeCommit creates approval rules that match the template conditions on every pull request created in the specified repository. BatchAssociateApprovalRuleTemplateWithRepositories, which associates a template with one or more specified repositories. After the template is associated with a repository, AWS CodeCommit creates approval rules that match the template conditions on every pull request created in the specified repositories. BatchDisassociateApprovalRuleTemplateFromRepositories, which removes the association between a template and specified repositories so that approval rules based on the template are not automatically created when pull requests are created in those repositories. CreateApprovalRuleTemplate, which creates a template for approval rules that can then be associated with one or more repositories in your AWS account. DeleteApprovalRuleTemplate, which deletes the specified template. It does not remove approval rules on pull requests already created with the template. DisassociateApprovalRuleTemplateFromRepository, which removes the association between a template and a repository so that approval rules based on the template are not automatically created when pull requests are created in the specified repository. GetApprovalRuleTemplate, which returns information about an approval rule template. ListApprovalRuleTemplates, which lists all approval rule templates in the AWS Region in your AWS account. ListAssociatedApprovalRuleTemplatesForRepository, which lists all approval rule templates that are associated with a specified repository. ListRepositoriesForApprovalRuleTemplate, which lists all repositories associated with the specified approval rule template. UpdateApprovalRuleTemplateDescription, which updates the description of an approval rule template. UpdateApprovalRuleTemplateName, which updates the name of an approval rule template. UpdateApprovalRuleTemplateContent, which updates the content of an approval rule template. Comments in a repository, by calling the following: DeleteCommentContent, which deletes the content of a comment on a commit in a repository. GetComment, which returns information about a comment on a commit. GetCommentReactions, which returns information about emoji reactions to comments. GetCommentsForComparedCommit, which returns information about comments on the comparison between two commit specifiers in a repository. PostCommentForComparedCommit, which creates a comment on the comparison between two commit specifiers in a repository. PostCommentReply, which creates a reply to a comment. PutCommentReaction, which creates or updates an emoji reaction to a comment. UpdateComment, which updates the content of a comment on a commit in a repository. Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following: ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit. TagResource, which adds or updates tags for a resource in AWS CodeCommit. UntagResource, which removes tags for a resource in AWS CodeCommit. Triggers, by calling the following: GetRepositoryTriggers, which returns information about triggers configured for a repository. PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers. TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target. For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide\",\n loadSpec: \"aws/codecommit\"\n }, {\n name: \"codeguru-reviewer\",\n description: \"This section provides documentation for the Amazon CodeGuru Reviewer API operations. CodeGuru Reviewer is a service that uses program analysis and machine learning to detect potential defects that are difficult for developers to find and recommendations to address them in your Java and Python code. By proactively detecting and providing recommendations for addressing code defects and implementing best practices, CodeGuru Reviewer improves the overall quality and maintainability of your code base during the code review stage. For more information about CodeGuru Reviewer, see the Amazon CodeGuru Reviewer User Guide. To improve the security of your CodeGuru Reviewer API calls, you can establish a private connection between your VPC and CodeGuru Reviewer by creating an interface VPC endpoint. For more information, see CodeGuru Reviewer and interface VPC endpoints (AWS PrivateLink) in the Amazon CodeGuru Reviewer User Guide\",\n loadSpec: \"aws/codeguru-reviewer\"\n }, {\n name: \"codeguruprofiler\",\n description: \"This section provides documentation for the Amazon CodeGuru Profiler API operations. Amazon CodeGuru Profiler collects runtime performance data from your live applications, and provides recommendations that can help you fine-tune your application performance. Using machine learning algorithms, CodeGuru Profiler can help you find your most expensive lines of code and suggest ways you can improve efficiency and remove CPU bottlenecks. Amazon CodeGuru Profiler provides different visualizations of profiling data to help you identify what code is running on the CPU, see how much time is consumed, and suggest ways to reduce CPU utilization. Amazon CodeGuru Profiler currently supports applications written in all Java virtual machine (JVM) languages and Python. While CodeGuru Profiler supports both visualizations and recommendations for applications written in Java, it can also generate visualizations and a subset of recommendations for applications written in other JVM languages and Python. For more information, see What is Amazon CodeGuru Profiler in the Amazon CodeGuru Profiler User Guide\",\n loadSpec: \"aws/codeguruprofiler\"\n }, {\n name: \"codepipeline\",\n description: \"AWS CodePipeline Overview This is the AWS CodePipeline API Reference. This guide provides descriptions of the actions and data types for AWS CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the AWS CodePipeline User Guide. You can use the AWS CodePipeline API to work with pipelines, stages, actions, and transitions. Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions. You can work with pipelines by calling: CreatePipeline, which creates a uniquely named pipeline. DeletePipeline, which deletes the specified pipeline. GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN). GetPipelineExecution, which returns information about a specific execution of a pipeline. GetPipelineState, which returns information about the current state of the stages and actions of a pipeline. ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details. ListPipelines, which gets a summary of all of the pipelines associated with your account. ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline. StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline. StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline. UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline. Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see AWS CodePipeline Pipeline Structure Reference. Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are: Source Build Test Deploy Approval Invoke Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete. You can work with transitions by calling: DisableStageTransition, which prevents artifacts from transitioning to the next stage in a pipeline. EnableStageTransition, which enables transition of artifacts between stages in a pipeline. Using the API to integrate with AWS CodePipeline For third-party integrators or developers who want to create their own integrations with AWS CodePipeline, the expected sequence varies from the standard API user. To integrate with AWS CodePipeline, developers need to work with the following items: Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source. You can work with jobs by calling: AcknowledgeJob, which confirms whether a job worker has received the specified job. GetJobDetails, which returns the details of a job. PollForJobs, which determines whether there are any jobs to act on. PutJobFailureResult, which provides details of a job failure. PutJobSuccessResult, which provides details of a job success. Third party jobs, which are instances of an action created by a partner action and integrated into AWS CodePipeline. Partner actions are created by members of the AWS Partner Network. You can work with third party jobs by calling: AcknowledgeThirdPartyJob, which confirms whether a job worker has received the specified job. GetThirdPartyJobDetails, which requests the details of a job for a partner action. PollForThirdPartyJobs, which determines whether there are any jobs to act on. PutThirdPartyJobFailureResult, which provides details of a job failure. PutThirdPartyJobSuccessResult, which provides details of a job success\",\n loadSpec: \"aws/codepipeline\"\n }, {\n name: \"codestar\",\n description: \"AWS CodeStar This is the API reference for AWS CodeStar. This reference provides descriptions of the operations and data types for the AWS CodeStar API along with usage examples. You can use the AWS CodeStar API to work with: Projects and their resources, by calling the following: DeleteProject, which deletes a project. DescribeProject, which lists the attributes of a project. ListProjects, which lists all projects associated with your AWS account. ListResources, which lists the resources associated with a project. ListTagsForProject, which lists the tags associated with a project. TagProject, which adds tags to a project. UntagProject, which removes tags from a project. UpdateProject, which updates the attributes of a project. Teams and team members, by calling the following: AssociateTeamMember, which adds an IAM user to the team for a project. DisassociateTeamMember, which removes an IAM user from the team for a project. ListTeamMembers, which lists all the IAM users in the team for a project, including their roles and attributes. UpdateTeamMember, which updates a team member's attributes in a project. Users, by calling the following: CreateUserProfile, which creates a user profile that contains data associated with the user across all projects. DeleteUserProfile, which deletes all user profile information across all projects. DescribeUserProfile, which describes the profile of a user. ListUserProfiles, which lists all user profiles. UpdateUserProfile, which updates the profile for a user\",\n loadSpec: \"aws/codestar\"\n }, {\n name: \"codestar-connections\",\n description: \"AWS CodeStar Connections This AWS CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Connections API. You can use the connections API to work with connections and installations. Connections are configurations that you use to connect AWS resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection. When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket app. When you create a connection, you can choose an existing installation or create one. When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a host for your connections. You can work with connections by calling: CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline. DeleteConnection, which deletes the specified connection. GetConnection, which returns information about the connection, including the connection status. ListConnections, which lists the connections associated with your account. You can work with hosts by calling: CreateHost, which creates a host that represents the infrastructure where your provider is installed. DeleteHost, which deletes the specified host. GetHost, which returns information about the host, including the setup status. ListHosts, which lists the hosts associated with your account. You can work with tags in AWS CodeStar Connections by calling the following: ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeStar Connections. TagResource, which adds or updates tags for a resource in AWS CodeStar Connections. UntagResource, which removes tags for a resource in AWS CodeStar Connections. For information about how to use AWS CodeStar Connections, see the Developer Tools User Guide\",\n loadSpec: \"aws/codestar-connections\"\n }, {\n name: \"codestar-notifications\",\n description: \"This AWS CodeStar Notifications API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Notifications API. You can use the AWS CodeStar Notifications API to work with the following objects: Notification rules, by calling the following: CreateNotificationRule, which creates a notification rule for a resource in your account. DeleteNotificationRule, which deletes a notification rule. DescribeNotificationRule, which provides information about a notification rule. ListNotificationRules, which lists the notification rules associated with your account. UpdateNotificationRule, which changes the name, events, or targets associated with a notification rule. Subscribe, which subscribes a target to a notification rule. Unsubscribe, which removes a target from a notification rule. Targets, by calling the following: DeleteTarget, which removes a notification rule target (SNS topic) from a notification rule. ListTargets, which lists the targets associated with a notification rule. Events, by calling the following: ListEventTypes, which lists the event types you can include in a notification rule. Tags, by calling the following: ListTagsForResource, which lists the tags already associated with a notification rule in your account. TagResource, which associates a tag you provide with a notification rule in your account. UntagResource, which removes a tag from a notification rule in your account. For information about how to use AWS CodeStar Notifications, see link in the CodeStarNotifications User Guide\",\n loadSpec: \"aws/codestar-notifications\"\n }, {\n name: \"cognito-identity\",\n description: \"Amazon Cognito Federated Identities Amazon Cognito Federated Identities is a web service that delivers scoped temporary credentials to mobile devices and other untrusted environments. It uniquely identifies a device and supplies the user with a consistent identity over the lifetime of an application. Using Amazon Cognito Federated Identities, you can enable authentication with one or more third-party identity providers (Facebook, Google, or Login with Amazon) or an Amazon Cognito user pool, and you can also choose to support unauthenticated access from your app. Cognito delivers a unique identifier for each user and acts as an OpenID token provider trusted by AWS Security Token Service (STS) to access temporary, limited-privilege AWS credentials. For a description of the authentication flow from the Amazon Cognito Developer Guide see Authentication Flow. For more information see Amazon Cognito Federated Identities\",\n loadSpec: \"aws/cognito-identity\"\n }, {\n name: \"cognito-idp\",\n description: \"Using the Amazon Cognito User Pools API, you can create a user pool to manage directories and users. You can authenticate a user to obtain tokens related to user identity and access policies. This API reference provides information about user pools in Amazon Cognito User Pools. For more information, see the Amazon Cognito Documentation\",\n loadSpec: \"aws/cognito-idp\"\n }, {\n name: \"cognito-sync\",\n description: \"Amazon Cognito Sync Amazon Cognito Sync provides an AWS service and client library that enable cross-device syncing of application-related user data. High-level client libraries are available for both iOS and Android. You can use these libraries to persist data locally so that it's available even if the device is offline. Developer credentials don't need to be stored on the mobile device to access the service. You can use Amazon Cognito to obtain a normalized user ID and credentials. User data is persisted in a dataset that can store up to 1 MB of key-value pairs, and you can have up to 20 datasets per user identity. With Amazon Cognito Sync, the data stored for each identity is accessible only to credentials assigned to that identity. In order to use the Cognito Sync service, you need to make API calls using credentials retrieved with Amazon Cognito Identity service. If you want to use Cognito Sync in an Android or iOS application, you will probably want to make API calls via the AWS Mobile SDK. To learn more, see the Developer Guide for Android and the Developer Guide for iOS\",\n loadSpec: \"aws/cognito-sync\"\n }, {\n name: \"comprehend\",\n description: \"Amazon Comprehend is an AWS service for gaining insight into the content of documents. Use these actions to determine the topics contained in your documents, the topics they discuss, the predominant sentiment expressed in them, the predominant language used, and more\",\n loadSpec: \"aws/comprehend\"\n }, {\n name: \"comprehendmedical\",\n description: \"Amazon Comprehend Medical extracts structured information from unstructured clinical text. Use these actions to gain insight in your documents\",\n loadSpec: \"aws/comprehendmedical\"\n }, {\n name: \"compute-optimizer\",\n description: \"AWS Compute Optimizer is a service that analyzes the configuration and utilization metrics of your AWS compute resources, such as EC2 instances, Auto Scaling groups, AWS Lambda functions, and Amazon EBS volumes. It reports whether your resources are optimal, and generates optimization recommendations to reduce the cost and improve the performance of your workloads. Compute Optimizer also provides recent utilization metric data, as well as projected utilization metric data for the recommendations, which you can use to evaluate which recommendation provides the best price-performance trade-off. The analysis of your usage patterns can help you decide when to move or resize your running resources, and still meet your performance and capacity requirements. For more information about Compute Optimizer, including the required permissions to use the service, see the AWS Compute Optimizer User Guide\",\n loadSpec: \"aws/compute-optimizer\"\n }, {\n name: \"connect\",\n description: \"Amazon Connect is a cloud-based contact center solution that you use to set up and manage a customer contact center and provide reliable customer engagement at any scale. Amazon Connect provides metrics and real-time reporting that enable you to optimize contact routing. You can also resolve customer issues more efficiently by getting customers in touch with the appropriate agents. There are limits to the number of Amazon Connect resources that you can create. There are also limits to the number of requests that you can make per second. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide. You can connect programmatically to an AWS service by using an endpoint. For a list of Amazon Connect endpoints, see Amazon Connect Endpoints. Working with contact flows? Check out the Amazon Connect Flow language\",\n loadSpec: \"aws/connect\"\n }, {\n name: \"connect-contact-lens\",\n description: \"Contact Lens for Amazon Connect enables you to analyze conversations between customer and agents, by using speech transcription, natural language processing, and intelligent search capabilities. It performs sentiment analysis, detects issues, and enables you to automatically categorize contacts. Contact Lens for Amazon Connect provides both real-time and post-call analytics of customer-agent conversations. For more information, see Analyze conversations using Contact Lens in the Amazon Connect Administrator Guide\",\n loadSpec: \"aws/connect-contact-lens\"\n }, {\n name: \"connectparticipant\",\n description: \"Amazon Connect is a cloud-based contact center solution that makes it easy to set up and manage a customer contact center and provide reliable customer engagement at any scale. Amazon Connect enables customer contacts through voice or chat. The APIs described here are used by chat participants, such as agents and customers\",\n loadSpec: \"aws/connectparticipant\"\n }, {\n name: \"cur\",\n description: \"The AWS Cost and Usage Report API enables you to programmatically create, query, and delete AWS Cost and Usage report definitions. AWS Cost and Usage reports track the monthly AWS costs and usage associated with your AWS account. The report contains line items for each unique combination of AWS product, usage type, and operation that your AWS account uses. You can configure the AWS Cost and Usage report to show only the data that you want, using the AWS Cost and Usage API. Service Endpoint The AWS Cost and Usage Report API provides the following endpoint: cur.us-east-1.amazonaws.com\",\n loadSpec: \"aws/cur\"\n }, {\n name: \"customer-profiles\",\n description: \"Amazon Connect Customer Profiles Welcome to the Amazon Connect Customer Profiles API Reference. This guide provides information about the Amazon Connect Customer Profiles API, including supported operations, data types, parameters, and schemas. Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center. If you're new to Amazon Connect , you might find it helpful to also review the Amazon Connect Administrator Guide\",\n loadSpec: \"aws/customer-profiles\"\n }, {\n name: \"databrew\",\n description: \"AWS Glue DataBrew is a visual, cloud-scale data-preparation service. DataBrew simplifies data preparation tasks, targeting data issues that are hard to spot and time-consuming to fix. DataBrew empowers users of all technical levels to visualize the data and perform one-click data transformations, with no coding required\",\n loadSpec: \"aws/databrew\"\n }, {\n name: \"dataexchange\",\n description: \"AWS Data Exchange is a service that makes it easy for AWS customers to exchange data in the cloud. You can use the AWS Data Exchange APIs to create, update, manage, and access file-based data set in the AWS Cloud.As a subscriber, you can view and access the data sets that you have an entitlement to through a subscription. You can use the APIS to download or copy your entitled data sets to Amazon S3 for use across a variety of AWS analytics and machine learning services.As a provider, you can create and manage your data sets that you would like to publish to a product. Being able to package and provide your data sets into products requires a few steps to determine eligibility. For more information, visit the AWS Data Exchange User Guide.A data set is a collection of data that can be changed or updated over time. Data sets can be updated using revisions, which represent a new version or incremental change to a data set. A revision contains one or more assets. An asset in AWS Data Exchange is a piece of data that can be stored as an Amazon S3 object. The asset can be a structured data file, an image file, or some other data file. Jobs are asynchronous import or export operations used to create or copy assets\",\n loadSpec: \"aws/dataexchange\"\n }, {\n name: \"datapipeline\",\n description: \"AWS Data Pipeline configures and manages a data-driven workflow called a pipeline. AWS Data Pipeline handles the details of scheduling and ensuring that data dependencies are met so that your application can focus on processing the data. AWS Data Pipeline provides a JAR implementation of a task runner called AWS Data Pipeline Task Runner. AWS Data Pipeline Task Runner provides logic for common data management scenarios, such as performing database queries and running data analysis using Amazon Elastic MapReduce (Amazon EMR). You can use AWS Data Pipeline Task Runner as your task runner, or you can write your own task runner to provide custom data management. AWS Data Pipeline implements two main sets of functionality. Use the first set to create a pipeline and define data sources, schedules, dependencies, and the transforms to be performed on the data. Use the second set in your task runner application to receive the next task ready for processing. The logic for performing the task, such as querying the data, running data analysis, or converting the data from one format to another, is contained within the task runner. The task runner performs the task assigned to it by the web service, reporting progress to the web service as it does so. When the task is done, the task runner reports the final success or failure of the task to the web service\",\n loadSpec: \"aws/datapipeline\"\n }, {\n name: \"datasync\",\n description: \"AWS DataSync AWS DataSync is a managed data transfer service that makes it simpler for you to automate moving data between on-premises storage and Amazon Simple Storage Service (Amazon S3) or Amazon Elastic File System (Amazon EFS). This API interface reference for AWS DataSync contains documentation for a programming interface that you can use to manage AWS DataSync\",\n loadSpec: \"aws/datasync\"\n }, {\n name: \"dax\",\n description: \"DAX is a managed caching service engineered for Amazon DynamoDB. DAX dramatically speeds up database reads by caching frequently-accessed data from DynamoDB, so applications can access that data with sub-millisecond latency. You can create a DAX cluster easily, using the AWS Management Console. With a few simple modifications to your code, your application can begin taking advantage of the DAX cluster and realize significant improvements in read performance\",\n loadSpec: \"aws/dax\"\n }, {\n name: \"detective\",\n description: 'Detective uses machine learning and purpose-built visualizations to help you analyze and investigate security issues across your Amazon Web Services (AWS) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from AWS CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty. The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by an administrator account. Every behavior graph is specific to a Region. You can only use the API to manage graphs that belong to the Region that is associated with the currently selected endpoint. A Detective administrator account can use the Detective API to do the following: Enable and disable Detective. Enabling Detective creates a new behavior graph. View the list of member accounts in a behavior graph. Add member accounts to a behavior graph. Remove member accounts from a behavior graph. A member account can use the Detective API to do the following: View the list of behavior graphs that they are invited to. Accept an invitation to contribute to a behavior graph. Decline an invitation to contribute to a behavior graph. Remove their account from a behavior graph. All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail. We replaced the term \"master account\" with the term \"administrator account.\" An administrator account is used to centrally manage multiple accounts. In the case of Detective, the administrator account manages the accounts in their behavior graph',\n loadSpec: \"aws/detective\"\n }, {\n name: \"devicefarm\",\n description: \"Welcome to the AWS Device Farm API documentation, which contains APIs for: Testing on desktop browsers Device Farm makes it possible for you to test your web applications on desktop browsers using Selenium. The APIs for desktop browser testing contain TestGrid in their names. For more information, see Testing Web Applications on Selenium with Device Farm. Testing on real mobile devices Device Farm makes it possible for you to test apps on physical phones, tablets, and other devices in the cloud. For more information, see the Device Farm Developer Guide\",\n loadSpec: \"aws/devicefarm\"\n }, {\n name: \"devops-guru\",\n description: \"Amazon DevOps Guru is a fully managed service that helps you identify anomalous behavior in business critical operational applications. You specify the AWS resources that you want DevOps Guru to cover, then the Amazon CloudWatch metrics and AWS CloudTrail events related to those resources are analyzed. When anomalous behavior is detected, DevOps Guru creates an insight that includes recommendations, related events, and related metrics that can help you improve your operational applications. For more information, see What is Amazon DevOps Guru. You can specify 1 or 2 Amazon Simple Notification Service topics so you are notified every time a new insight is created. You can also enable DevOps Guru to generate an OpsItem in AWS Systems Manager for each insight to help you manage and track your work addressing insights. To learn about the DevOps Guru workflow, see How DevOps Guru works. To learn about DevOps Guru concepts, see Concepts in DevOps Guru\",\n loadSpec: \"aws/devops-guru\"\n }, {\n name: \"directconnect\",\n description: \"AWS Direct Connect links your internal network to an AWS Direct Connect location over a standard Ethernet fiber-optic cable. One end of the cable is connected to your router, the other to an AWS Direct Connect router. With this connection in place, you can create virtual interfaces directly to the AWS cloud (for example, to Amazon EC2 and Amazon S3) and to Amazon VPC, bypassing Internet service providers in your network path. A connection provides access to all AWS Regions except the China (Beijing) and (China) Ningxia Regions. AWS resources in the China Regions can only be accessed through locations associated with those Regions\",\n loadSpec: \"aws/directconnect\"\n }, {\n name: \"discovery\",\n description: \"AWS Application Discovery Service AWS Application Discovery Service helps you plan application migration projects. It automatically identifies servers, virtual machines (VMs), and network dependencies in your on-premises data centers. For more information, see the AWS Application Discovery Service FAQ. Application Discovery Service offers three ways of performing discovery and collecting data about your on-premises servers: Agentless discovery is recommended for environments that use VMware vCenter Server. This mode doesn't require you to install an agent on each host. It does not work in non-VMware environments. Agentless discovery gathers server information regardless of the operating systems, which minimizes the time required for initial on-premises infrastructure assessment. Agentless discovery doesn't collect information about network dependencies, only agent-based discovery collects that information. Agent-based discovery collects a richer set of data than agentless discovery by using the AWS Application Discovery Agent, which you install on one or more hosts in your data center. The agent captures infrastructure and application information, including an inventory of running processes, system performance information, resource utilization, and network dependencies. The information collected by agents is secured at rest and in transit to the Application Discovery Service database in the cloud. AWS Partner Network (APN) solutions integrate with Application Discovery Service, enabling you to import details of your on-premises environment directly into Migration Hub without using the discovery connector or discovery agent. Third-party application discovery tools can query AWS Application Discovery Service, and they can write to the Application Discovery Service database using the public API. In this way, you can import data into Migration Hub and view it, so that you can associate applications with servers and track migrations. Recommendations We recommend that you use agent-based discovery for non-VMware environments, and whenever you want to collect information about network dependencies. You can run agent-based and agentless discovery simultaneously. Use agentless discovery to complete the initial infrastructure assessment quickly, and then install agents on select hosts to collect additional information. Working With This Guide This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs. Remember that you must set your Migration Hub home region before you call any of these APIs. You must make API calls for write actions (create, notify, associate, disassociate, import, or put) while in your home region, or a HomeRegionNotSetException error is returned. API calls for read actions (list, describe, stop, and delete) are permitted outside of your home region. Although it is unlikely, the Migration Hub home region could change. If you call APIs outside the home region, an InvalidInputException is returned. You must call GetHomeRegion to obtain the latest Migration Hub home region. This guide is intended for use with the AWS Application Discovery Service User Guide. All data is handled according to the AWS Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service\",\n loadSpec: \"aws/discovery\"\n }, {\n name: \"dlm\",\n description: \"Amazon Data Lifecycle Manager With Amazon Data Lifecycle Manager, you can manage the lifecycle of your AWS resources. You create lifecycle policies, which are used to automate operations on the specified resources. Amazon DLM supports Amazon EBS volumes and snapshots. For information about using Amazon DLM with Amazon EBS, see Automating the Amazon EBS Snapshot Lifecycle in the Amazon EC2 User Guide\",\n loadSpec: \"aws/dlm\"\n }, {\n name: \"dms\",\n description: \"AWS Database Migration Service AWS Database Migration Service (AWS DMS) can migrate your data to and from the most widely used commercial and open-source databases such as Oracle, PostgreSQL, Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora, MySQL, and SAP Adaptive Server Enterprise (ASE). The service supports homogeneous migrations such as Oracle to Oracle, as well as heterogeneous migrations between different database platforms, such as Oracle to MySQL or SQL Server to PostgreSQL. For more information about AWS DMS, see What Is AWS Database Migration Service? in the AWS Database Migration User Guide\",\n loadSpec: \"aws/dms\"\n }, {\n name: \"docdb\",\n description: \"Amazon DocumentDB API documentation\",\n loadSpec: \"aws/docdb\"\n }, {\n name: \"ds\",\n description: \"AWS Directory Service AWS Directory Service is a web service that makes it easy for you to setup and run directories in the AWS cloud, or connect your AWS resources with an existing on-premises Microsoft Active Directory. This guide provides detailed information about AWS Directory Service operations, data types, parameters, and errors. For information about AWS Directory Services features, see AWS Directory Service and the AWS Directory Service Administration Guide. AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to AWS Directory Service and other AWS services. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services\",\n loadSpec: \"aws/ds\"\n }, {\n name: \"dynamodb\",\n description: \"Amazon DynamoDB Amazon DynamoDB is a fully managed NoSQL database service that provides fast and predictable performance with seamless scalability. DynamoDB lets you offload the administrative burdens of operating and scaling a distributed database, so that you don't have to worry about hardware provisioning, setup and configuration, replication, software patching, or cluster scaling. With DynamoDB, you can create database tables that can store and retrieve any amount of data, and serve any level of request traffic. You can scale up or scale down your tables' throughput capacity without downtime or performance degradation, and use the AWS Management Console to monitor resource utilization and performance metrics. DynamoDB automatically spreads the data and traffic for your tables over a sufficient number of servers to handle your throughput and storage requirements, while maintaining consistent and fast performance. All of your data is stored on solid state disks (SSDs) and automatically replicated across multiple Availability Zones in an AWS region, providing built-in high availability and data durability\",\n loadSpec: \"aws/dynamodb\"\n }, {\n name: \"dynamodbstreams\",\n description: \"Amazon DynamoDB Amazon DynamoDB Streams provides API actions for accessing streams and processing stream records. To learn more about application development with Streams, see Capturing Table Activity with DynamoDB Streams in the Amazon DynamoDB Developer Guide\",\n loadSpec: \"aws/dynamodbstreams\"\n }, {\n name: \"ebs\",\n description: \"You can use the Amazon Elastic Block Store (Amazon EBS) direct APIs to create EBS snapshots, write data directly to your snapshots, read data on your snapshots, and identify the differences or changes between two snapshots. If you\\u2019re an independent software vendor (ISV) who offers backup services for Amazon EBS, the EBS direct APIs make it more efficient and cost-effective to track incremental changes on your EBS volumes through snapshots. This can be done without having to create new volumes from snapshots, and then use Amazon Elastic Compute Cloud (Amazon EC2) instances to compare the differences. You can create incremental snapshots directly from data on-premises into EBS volumes and the cloud to use for quick disaster recovery. With the ability to write and read snapshots, you can write your on-premises data to an EBS snapshot during a disaster. Then after recovery, you can restore it back to AWS or on-premises from the snapshot. You no longer need to build and maintain complex mechanisms to copy data to and from Amazon EBS. This API reference provides detailed information about the actions, data types, parameters, and errors of the EBS direct APIs. For more information about the elements that make up the EBS direct APIs, and examples of how to use them effectively, see Accessing the Contents of an EBS Snapshot in the Amazon Elastic Compute Cloud User Guide. For more information about the supported AWS Regions, endpoints, and service quotas for the EBS direct APIs, see Amazon Elastic Block Store Endpoints and Quotas in the AWS General Reference\",\n loadSpec: \"aws/ebs\"\n }, {\n name: \"ec2\",\n description: \"Amazon Elastic Compute Cloud Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the AWS Cloud. Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications faster. Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically isolated section of the AWS Cloud where you can launch AWS resources in a virtual network that you've defined. Amazon Elastic Block Store (Amazon EBS) provides block level storage volumes for use with EC2 instances. EBS volumes are highly available and reliable storage volumes that can be attached to any running instance and used like a hard drive. To learn more, see the following resources: Amazon EC2: AmazonEC2 product page, Amazon EC2 documentation Amazon EBS: Amazon EBS product page, Amazon EBS documentation Amazon VPC: Amazon VPC product page, Amazon VPC documentation AWS VPN: AWS VPN product page, AWS VPN documentation\",\n loadSpec: \"aws/ec2\"\n }, {\n name: \"ec2-instance-connect\",\n description: \"Amazon EC2 Instance Connect enables system administrators to publish one-time use SSH public keys to EC2, providing users a simple and secure way to connect to their instances\",\n loadSpec: \"aws/ec2-instance-connect\"\n }, {\n name: \"ecr\",\n description: \"Amazon Elastic Container Registry Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR provides a secure, scalable, and reliable registry for your Docker or Open Container Initiative (OCI) images. Amazon ECR supports private repositories with resource-based permissions using IAM so that specific users or Amazon EC2 instances can access repositories and images\",\n loadSpec: \"aws/ecr\"\n }, {\n name: \"ecr-public\",\n description: \"Amazon Elastic Container Registry Public Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Amazon ECR provides both public and private registries to host your container images. You can use the familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR provides a secure, scalable, and reliable registry for your Docker or Open Container Initiative (OCI) images. Amazon ECR supports public repositories with this API. For information about the Amazon ECR API for private repositories, see Amazon Elastic Container Registry API Reference\",\n loadSpec: \"aws/ecr-public\"\n }, {\n name: \"ecs\",\n description: \"Amazon Elastic Container Service Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes it easy to run, stop, and manage Docker containers on a cluster. You can host your cluster on a serverless infrastructure that is managed by Amazon ECS by launching your services or tasks on AWS Fargate. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage. Amazon ECS makes it easy to launch and stop container-based applications with simple API calls, allows you to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features. You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. Amazon ECS eliminates the need for you to operate your own cluster management and configuration management systems or worry about scaling your management infrastructure\",\n loadSpec: \"aws/ecs\"\n }, {\n name: \"efs\",\n description: \"Amazon Elastic File System Amazon Elastic File System (Amazon EFS) provides simple, scalable file storage for use with Amazon EC2 instances in the AWS Cloud. With Amazon EFS, storage capacity is elastic, growing and shrinking automatically as you add and remove files, so your applications have the storage they need, when they need it. For more information, see the User Guide\",\n loadSpec: \"aws/efs\"\n }, {\n name: \"eks\",\n description: \"Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications. Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use all the existing plugins and tooling from the Kubernetes community. Applications running on Amazon EKS are fully compatible with applications running on any standard Kubernetes environment, whether running in on-premises data centers or public clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required\",\n loadSpec: \"aws/eks\"\n }, {\n name: \"elastic-inference\",\n description: \"Elastic Inference public APIs\",\n loadSpec: \"aws/elastic-inference\"\n }, {\n name: \"elasticache\",\n description: \"Amazon ElastiCache Amazon ElastiCache is a web service that makes it easier to set up, operate, and scale a distributed cache in the cloud. With ElastiCache, customers get all of the benefits of a high-performance, in-memory cache with less of the administrative burden involved in launching and managing a distributed cache. The service makes setup, scaling, and cluster failure handling much simpler than in a self-managed cache deployment. In addition, through integration with Amazon CloudWatch, customers get enhanced visibility into the key performance statistics associated with their cache and can receive alarms if a part of their cache runs hot\",\n loadSpec: \"aws/elasticache\"\n }, {\n name: \"elasticbeanstalk\",\n description: \"AWS Elastic Beanstalk AWS Elastic Beanstalk makes it easy for you to create, deploy, and manage scalable, fault-tolerant applications running on the Amazon Web Services cloud. For more information about this product, go to the AWS Elastic Beanstalk details page. The location of the latest AWS Elastic Beanstalk WSDL is https://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl. To install the Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools that enable you to access the API, go to Tools for Amazon Web Services. Endpoints For a list of region-specific endpoints that AWS Elastic Beanstalk supports, go to Regions and Endpoints in the Amazon Web Services Glossary\",\n loadSpec: \"aws/elasticbeanstalk\"\n }, {\n name: \"elastictranscoder\",\n description: \"AWS Elastic Transcoder Service The AWS Elastic Transcoder Service\",\n loadSpec: \"aws/elastictranscoder\"\n }, {\n name: \"elb\",\n description: \"Elastic Load Balancing A load balancer can distribute incoming traffic across your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered instances and ensures that it routes traffic only to healthy instances. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer and a protocol and port number for connections from the load balancer to the instances. Elastic Load Balancing supports three types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers. You can select a load balancer based on your application needs. For more information, see the Elastic Load Balancing User Guide. This reference covers the 2012-06-01 API, which supports Classic Load Balancers. The 2015-12-01 API supports Application Load Balancers and Network Load Balancers. To get started, create a load balancer with one or more listeners using CreateLoadBalancer. Register your instances with the load balancer using RegisterInstancesWithLoadBalancer. All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds with a 200 OK response code\",\n loadSpec: \"aws/elb\"\n }, {\n name: \"elbv2\",\n description: \"Elastic Load Balancing A load balancer distributes incoming traffic across targets, such as your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer. You configure a target group with a protocol and port number for connections from the load balancer to the targets, and with health check settings to be used when checking the health status of the targets. Elastic Load Balancing supports the following types of load balancers: Application Load Balancers, Network Load Balancers, Gateway Load Balancers, and Classic Load Balancers. This reference covers the following load balancer types: Application Load Balancer - Operates at the application layer (layer 7) and supports HTTP and HTTPS. Network Load Balancer - Operates at the transport layer (layer 4) and supports TCP, TLS, and UDP. Gateway Load Balancer - Operates at the network layer (layer 3). For more information, see the Elastic Load Balancing User Guide. All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds\",\n loadSpec: \"aws/elbv2\"\n }, {\n name: \"emr\",\n description: \"Amazon EMR is a web service that makes it easier to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several AWS services to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehouse management\",\n loadSpec: \"aws/emr\"\n }, {\n name: \"emr-containers\",\n description: 'Amazon EMR on EKS provides a deployment option for Amazon EMR that allows you to run open-source big data frameworks on Amazon Elastic Kubernetes Service (Amazon EKS). With this deployment option, you can focus on running analytics workloads while Amazon EMR on EKS builds, configures, and manages containers for open-source applications. For more information about Amazon EMR on EKS concepts and tasks, see What is Amazon EMR on EKS. Amazon EMR containers is the API name for Amazon EMR on EKS. The emr-containers prefix is used in the following scenarios: It is the prefix in the CLI commands for Amazon EMR on EKS. For example, aws emr-containers start-job-run. It is the prefix before IAM policy actions for Amazon EMR on EKS. For example, \"Action\": [ \"emr-containers:StartJobRun\"]. For more information, see Policy actions for Amazon EMR on EKS. It is the prefix used in Amazon EMR on EKS service endpoints. For example, emr-containers.us-east-2.amazonaws.com. For more information, see Amazon EMR on EKS Service Endpoints',\n loadSpec: \"aws/emr-containers\"\n }, {\n name: \"es\",\n description: \"Amazon Elasticsearch Configuration Service Use the Amazon Elasticsearch Configuration API to create, configure, and manage Elasticsearch domains. For sample code that uses the Configuration API, see the Amazon Elasticsearch Service Developer Guide. The guide also contains sample code for sending signed HTTP requests to the Elasticsearch APIs. The endpoint for configuration service requests is region-specific: es.region.amazonaws.com. For example, es.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints\",\n loadSpec: \"aws/es\"\n }, {\n name: \"events\",\n description: \"Amazon EventBridge helps you to respond to state changes in your AWS resources. When your resources change state, they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a predetermined schedule. For example, you can configure rules to: Automatically invoke an AWS Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state. Direct specific API records from AWS CloudTrail to an Amazon Kinesis data stream for detailed analysis of potential security or availability risks. Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume. For more information about the features of Amazon EventBridge, see the Amazon EventBridge User Guide\",\n loadSpec: \"aws/events\"\n }, {\n name: \"firehose\",\n description: \"Amazon Kinesis Data Firehose API Reference Amazon Kinesis Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), Amazon Redshift, and Splunk\",\n loadSpec: \"aws/firehose\"\n }, {\n name: \"fis\",\n description: \"AWS Fault Injection Simulator is a managed service that enables you to perform fault injection experiments on your AWS workloads. For more information, see the AWS Fault Injection Simulator User Guide\",\n loadSpec: \"aws/fis\"\n }, {\n name: \"fms\",\n description: \"AWS Firewall Manager This is the AWS Firewall Manager API Reference. This guide is for developers who need detailed information about the AWS Firewall Manager API actions, data types, and errors. For detailed information about AWS Firewall Manager features, see the AWS Firewall Manager Developer Guide. Some API actions require explicit resource permissions. For information, see the developer guide topic Firewall Manager required permissions for API actions\",\n loadSpec: \"aws/fms\"\n }, {\n name: \"forecast\",\n description: \"Provides APIs for creating and managing Amazon Forecast resources\",\n loadSpec: \"aws/forecast\"\n }, {\n name: \"forecastquery\",\n description: \"Provides APIs for creating and managing Amazon Forecast resources\",\n loadSpec: \"aws/forecastquery\"\n }, {\n name: \"frauddetector\",\n description: \"This is the Amazon Fraud Detector API Reference. This guide is for developers who need detailed information about Amazon Fraud Detector API actions, data types, and errors. For more information about Amazon Fraud Detector features, see the Amazon Fraud Detector User Guide\",\n loadSpec: \"aws/frauddetector\"\n }, {\n name: \"fsx\",\n description: \"Amazon FSx is a fully managed service that makes it easy for storage and application administrators to launch and use shared file storage\",\n loadSpec: \"aws/fsx\"\n }, {\n name: \"gamelift\",\n description: \"Amazon GameLift Service GameLift provides solutions for hosting session-based multiplayer game servers in the cloud, including tools for deploying, operating, and scaling game servers. Built on AWS global computing infrastructure, GameLift helps you deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet player demand. About GameLift solutions Get more information on these GameLift solutions in the GameLift Developer Guide. GameLift managed hosting -- GameLift offers a fully managed service to set up and maintain computing machines for hosting, manage game session and player session life cycle, and handle security, storage, and performance tracking. You can use automatic scaling tools to balance player demand and hosting costs, configure your game session management to minimize player latency, and add FlexMatch for matchmaking. Managed hosting with Realtime Servers -- With GameLift Realtime Servers, you can quickly configure and set up ready-to-go game servers for your game. Realtime Servers provides a game server framework with core GameLift infrastructure already built in. Then use the full range of GameLift managed hosting features, including FlexMatch, for your game. GameLift FleetIQ -- Use GameLift FleetIQ as a standalone service while hosting your games using EC2 instances and Auto Scaling groups. GameLift FleetIQ provides optimizations for game hosting, including boosting the viability of low-cost Spot Instances gaming. For a complete solution, pair the GameLift FleetIQ and FlexMatch standalone services. GameLift FlexMatch -- Add matchmaking to your game hosting solution. FlexMatch is a customizable matchmaking service for multiplayer games. Use FlexMatch as integrated with GameLift managed hosting or incorporate FlexMatch as a standalone service into your own hosting solution. About this API Reference This reference guide describes the low-level service API for Amazon GameLift. With each topic in this guide, you can find links to language-specific SDK guides and the AWS CLI reference. Useful links: GameLift API operations listed by tasks GameLift tools and resources\",\n loadSpec: \"aws/gamelift\"\n }, {\n name: \"glacier\",\n description: \"Amazon S3 Glacier (Glacier) is a storage solution for \\\"cold data.\\\" Glacier is an extremely low-cost storage service that provides secure, durable, and easy-to-use storage for data backup and archival. With Glacier, customers can store their data cost effectively for months, years, or decades. Glacier also enables customers to offload the administrative burdens of operating and scaling storage to AWS, so they don't have to worry about capacity planning, hardware provisioning, data replication, hardware failure and recovery, or time-consuming hardware migrations. Glacier is a great storage choice when low storage cost is paramount and your data is rarely retrieved. If your application requires fast or frequent access to your data, consider using Amazon S3. For more information, see Amazon Simple Storage Service (Amazon S3). You can store any kind of data in any format. There is no maximum limit on the total amount of data you can store in Glacier. If you are a first-time user of Glacier, we recommend that you begin by reading the following sections in the Amazon S3 Glacier Developer Guide: What is Amazon S3 Glacier - This section of the Developer Guide describes the underlying data model, the operations it supports, and the AWS SDKs that you can use to interact with the service. Getting Started with Amazon S3 Glacier - The Getting Started section walks you through the process of creating a vault, uploading archives, creating jobs to download archives, retrieving the job output, and deleting archives\",\n loadSpec: \"aws/glacier\"\n }, {\n name: \"globalaccelerator\",\n description: \"AWS Global Accelerator This is the AWS Global Accelerator API Reference. This guide is for developers who need detailed information about AWS Global Accelerator API actions, data types, and errors. For more information about Global Accelerator features, see the AWS Global Accelerator Developer Guide. AWS Global Accelerator is a service in which you create accelerators to improve the performance of your applications for local and global users. Depending on the type of accelerator you choose, you can gain additional benefits. By using a standard accelerator, you can improve availability of your internet applications that are used by a global audience. With a standard accelerator, Global Accelerator directs traffic to optimal endpoints over the AWS global network. For other scenarios, you might choose a custom routing accelerator. With a custom routing accelerator, you can use application logic to directly map one or more users to a specific endpoint among many endpoints. Global Accelerator is a global service that supports endpoints in multiple AWS Regions but you must specify the US West (Oregon) Region to create or update accelerators. By default, Global Accelerator provides you with two static IP addresses that you associate with your accelerator. With a standard accelerator, instead of using the IP addresses that Global Accelerator provides, you can configure these entry points to be IPv4 addresses from your own IP address ranges that you bring to Global Accelerator. The static IP addresses are anycast from the AWS edge network. For a standard accelerator, they distribute incoming application traffic across multiple endpoint resources in multiple AWS Regions, which increases the availability of your applications. Endpoints for standard accelerators can be Network Load Balancers, Application Load Balancers, Amazon EC2 instances, or Elastic IP addresses that are located in one AWS Region or multiple Regions. For custom routing accelerators, you map traffic that arrives to the static IP addresses to specific Amazon EC2 servers in endpoints that are virtual private cloud (VPC) subnets. The static IP addresses remain assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete an accelerator, you lose the static IP addresses that are assigned to it, so you can no longer route traffic by using them. You can use IAM policies like tag-based permissions with Global Accelerator to limit the users who have permissions to delete an accelerator. For more information, see Tag-based policies. For standard accelerators, Global Accelerator uses the AWS global network to route traffic to the optimal regional endpoint based on health, client location, and policies that you configure. The service reacts instantly to changes in health or configuration to ensure that internet traffic from clients is always directed to healthy endpoints. For a list of the AWS Regions where Global Accelerator and other services are currently supported, see the AWS Region Table. AWS Global Accelerator includes the following components: Static IP addresses Global Accelerator provides you with a set of two static IP addresses that are anycast from the AWS edge network. If you bring your own IP address range to AWS (BYOIP) to use with a standard accelerator, you can instead assign IP addresses from your own pool to use with your accelerator. For more information, see Bring your own IP addresses (BYOIP) in AWS Global Accelerator. The IP addresses serve as single fixed entry points for your clients. If you already have Elastic Load Balancing load balancers, Amazon EC2 instances, or Elastic IP address resources set up for your applications, you can easily add those to a standard accelerator in Global Accelerator. This allows Global Accelerator to use static IP addresses to access the resources. The static IP addresses remain assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete an accelerator, you lose the static IP addresses that are assigned to it, so you can no longer route traffic by using them. You can use IAM policies like tag-based permissions with Global Accelerator to delete an accelerator. For more information, see Tag-based policies. Accelerator An accelerator directs traffic to endpoints over the AWS global network to improve the performance of your internet applications. Each accelerator includes one or more listeners. There are two types of accelerators: A standard accelerator directs traffic to the optimal AWS endpoint based on several factors, including the user\\u2019s location, the health of the endpoint, and the endpoint weights that you configure. This improves the availability and performance of your applications. Endpoints can be Network Load Balancers, Application Load Balancers, Amazon EC2 instances, or Elastic IP addresses. A custom routing accelerator directs traffic to one of possibly thousands of Amazon EC2 instances running in a single or multiple virtual private clouds (VPCs). With custom routing, listener ports are mapped to statically associate port ranges with VPC subnets, which allows Global Accelerator to determine an EC2 instance IP address at the time of connection. By default, all port mapping destinations in a VPC subnet can't receive traffic. You can choose to configure all destinations in the subnet to receive traffic, or to specify individual port mappings that can receive traffic. For more information, see Types of accelerators. DNS name Global Accelerator assigns each accelerator a default Domain Name System (DNS) name, similar to a1234567890abcdef.awsglobalaccelerator.com, that points to the static IP addresses that Global Accelerator assigns to you or that you choose from your own IP address range. Depending on the use case, you can use your accelerator's static IP addresses or DNS name to route traffic to your accelerator, or set up DNS records to route traffic using your own custom domain name. Network zone A network zone services the static IP addresses for your accelerator from a unique IP subnet. Similar to an AWS Availability Zone, a network zone is an isolated unit with its own set of physical infrastructure. When you configure an accelerator, by default, Global Accelerator allocates two IPv4 addresses for it. If one IP address from a network zone becomes unavailable due to IP address blocking by certain client networks, or network disruptions, then client applications can retry on the healthy static IP address from the other isolated network zone. Listener A listener processes inbound connections from clients to Global Accelerator, based on the port (or port range) and protocol (or protocols) that you configure. A listener can be configured for TCP, UDP, or both TCP and UDP protocols. Each listener has one or more endpoint groups associated with it, and traffic is forwarded to endpoints in one of the groups. You associate endpoint groups with listeners by specifying the Regions that you want to distribute traffic to. With a standard accelerator, traffic is distributed to optimal endpoints within the endpoint groups associated with a listener. Endpoint group Each endpoint group is associated with a specific AWS Region. Endpoint groups include one or more endpoints in the Region. With a standard accelerator, you can increase or reduce the percentage of traffic that would be otherwise directed to an endpoint group by adjusting a setting called a traffic dial. The traffic dial lets you easily do performance testing or blue/green deployment testing, for example, for new releases across different AWS Regions. Endpoint An endpoint is a resource that Global Accelerator directs traffic to. Endpoints for standard accelerators can be Network Load Balancers, Application Load Balancers, Amazon EC2 instances, or Elastic IP addresses. An Application Load Balancer endpoint can be internet-facing or internal. Traffic for standard accelerators is routed to endpoints based on the health of the endpoint along with configuration options that you choose, such as endpoint weights. For each endpoint, you can configure weights, which are numbers that you can use to specify the proportion of traffic to route to each one. This can be useful, for example, to do performance testing within a Region. Endpoints for custom routing accelerators are virtual private cloud (VPC) subnets with one or many EC2 instances\",\n loadSpec: \"aws/globalaccelerator\"\n }, {\n name: \"glue\",\n description: \"AWS Glue Defines the public endpoint for the AWS Glue service\",\n loadSpec: \"aws/glue\"\n }, {\n name: \"greengrass\",\n description: \"AWS IoT Greengrass seamlessly extends AWS onto physical devices so they can act locally on the data they generate, while still using the cloud for management, analytics, and durable storage. AWS IoT Greengrass ensures your devices can respond quickly to local events and operate with intermittent connectivity. AWS IoT Greengrass minimizes the cost of transmitting data to the cloud by allowing you to author AWS Lambda functions that execute locally\",\n loadSpec: \"aws/greengrass\"\n }, {\n name: \"greengrassv2\",\n description: \"AWS IoT Greengrass brings local compute, messaging, data management, sync, and ML inference capabilities to edge devices. This enables devices to collect and analyze data closer to the source of information, react autonomously to local events, and communicate securely with each other on local networks. Local devices can also communicate securely with AWS IoT Core and export IoT data to the AWS Cloud. AWS IoT Greengrass developers can use AWS Lambda functions and components to create and deploy applications to fleets of edge devices for local operation. AWS IoT Greengrass Version 2 provides a new major version of the AWS IoT Greengrass Core software, new APIs, and a new console. Use this API reference to learn how to use the AWS IoT Greengrass V2 API operations to manage components, manage deployments, and core devices. For more information, see What is AWS IoT Greengrass? in the AWS IoT Greengrass V2 Developer Guide\",\n loadSpec: \"aws/greengrassv2\"\n }, {\n name: \"groundstation\",\n description: \"Welcome to the AWS Ground Station API Reference. AWS Ground Station is a fully managed service that enables you to control satellite communications, downlink and process satellite data, and scale your satellite operations efficiently and cost-effectively without having to build or manage your own ground station infrastructure\",\n loadSpec: \"aws/groundstation\"\n }, {\n name: \"guardduty\",\n description: \"Amazon GuardDuty is a continuous security monitoring service that analyzes and processes the following data sources: VPC Flow Logs, AWS CloudTrail event logs, and DNS logs. It uses threat intelligence feeds (such as lists of malicious IPs and domains) and machine learning to identify unexpected, potentially unauthorized, and malicious activity within your AWS environment. This can include issues like escalations of privileges, uses of exposed credentials, or communication with malicious IPs, URLs, or domains. For example, GuardDuty can detect compromised EC2 instances that serve malware or mine bitcoin. GuardDuty also monitors AWS account access behavior for signs of compromise. Some examples of this are unauthorized infrastructure deployments such as EC2 instances deployed in a Region that has never been used, or unusual API calls like a password policy change to reduce password strength. GuardDuty informs you of the status of your AWS environment by producing security findings that you can view in the GuardDuty console or through Amazon CloudWatch events. For more information, see the Amazon GuardDuty User Guide\",\n loadSpec: \"aws/guardduty\"\n }, {\n name: \"health\",\n description: \"AWS Health The AWS Health API provides programmatic access to the AWS Health information that appears in the AWS Personal Health Dashboard. You can use the API operations to get information about AWS Health events that affect your AWS services and resources. You must have a Business or Enterprise support plan from AWS Support to use the AWS Health API. If you call the AWS Health API from an AWS account that doesn't have a Business or Enterprise support plan, you receive a SubscriptionRequiredException error. AWS Health has a single endpoint: health.us-east-1.amazonaws.com (HTTPS). Use this endpoint to call the AWS Health API operations. For authentication of requests, AWS Health uses the Signature Version 4 Signing Process. If your AWS account is part of AWS Organizations, you can use the AWS Health organizational view feature. This feature provides a centralized view of AWS Health events across all accounts in your organization. You can aggregate AWS Health events in real time to identify accounts in your organization that are affected by an operational event or get notified of security vulnerabilities. Use the organizational view API operations to enable this feature and return event information. For more information, see Aggregating AWS Health events in the AWS Health User Guide. When you use the AWS Health API operations to return AWS Health events, see the following recommendations: Use the eventScopeCode parameter to specify whether to return AWS Health events that are public or account-specific. Use pagination to view all events from the response. For example, if you call the DescribeEventsForOrganization operation to get all events in your organization, you might receive several page results. Specify the nextToken in the next request to return more results\",\n loadSpec: \"aws/health\"\n }, {\n name: \"healthlake\",\n description: \"Amazon HealthLake is a HIPAA eligibile service that allows customers to store, transform, query, and analyze their FHIR-formatted data in a consistent fashion in the cloud\",\n loadSpec: \"aws/healthlake\"\n }, {\n name: \"help\",\n description: \"Displays aws usage information\"\n }, {\n name: \"honeycode\",\n description: \"Amazon Honeycode is a fully managed service that allows you to quickly build mobile and web apps for teams\\u2014without programming. Build Honeycode apps for managing almost anything, like projects, customers, operations, approvals, resources, and even your team\",\n loadSpec: \"aws/honeycode\"\n }, {\n name: \"iam\",\n description: \"AWS Identity and Access Management AWS Identity and Access Management (IAM) is a web service for securely controlling access to AWS services. With IAM, you can centrally manage users, security credentials such as access keys, and permissions that control which AWS resources users and applications can access. For more information about IAM, see AWS Identity and Access Management (IAM) and the AWS Identity and Access Management User Guide\",\n loadSpec: \"aws/iam\"\n }, {\n name: \"identitystore\",\n description: \"\",\n loadSpec: \"aws/identitystore\"\n }, {\n name: \"imagebuilder\",\n description: 'EC2 Image Builder is a fully managed AWS service that makes it easier to automate the creation, management, and deployment of customized, secure, and up-to-date \"golden\" server images that are pre-installed and pre-configured with software and settings to meet specific IT standards',\n loadSpec: \"aws/imagebuilder\"\n }, {\n name: \"importexport\",\n description: \"AWS Import/Export Service AWS Import/Export accelerates transferring large amounts of data between the AWS cloud and portable storage devices that you mail to us. AWS Import/Export transfers data directly onto and off of your storage devices using Amazon's high-speed internal network and bypassing the Internet. For large data sets, AWS Import/Export is often faster than Internet transfer and more cost effective than upgrading your connectivity\",\n loadSpec: \"aws/importexport\"\n }, {\n name: \"inspector\",\n description: \"Amazon Inspector Amazon Inspector enables you to analyze the behavior of your AWS resources and to identify potential security issues. For more information, see Amazon Inspector User Guide\",\n loadSpec: \"aws/inspector\"\n }, {\n name: \"iot\",\n description: \"AWS IoT AWS IoT provides secure, bi-directional communication between Internet-connected devices (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. You can discover your custom IoT-Data endpoint to communicate with, configure rules for data processing and integration with other services, organize resources associated with each device (Registry), configure logging, and create and manage policies and credentials to authenticate devices. The service endpoints that expose this API are listed in AWS IoT Core Endpoints and Quotas. You must use the endpoint for the region that has the resources you want to access. The service name used by AWS Signature Version 4 to sign the request is: execute-api. For more information about how AWS IoT works, see the Developer Guide. For information about how to use the credentials provider for AWS IoT, see Authorizing Direct Calls to AWS Services\",\n loadSpec: \"aws/iot\"\n }, {\n name: \"iot-data\",\n description: \"AWS IoT AWS IoT-Data enables secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. It implements a broker for applications and things to publish messages over HTTP (Publish) and retrieve, update, and delete shadows. A shadow is a persistent representation of your things and their state in the AWS cloud. Find the endpoint address for actions in the AWS IoT data plane by running this CLI command: aws iot describe-endpoint --endpoint-type iot:Data-ATS The service name used by AWS Signature Version 4 to sign requests is: iotdevicegateway\",\n loadSpec: \"aws/iot-data\"\n }, {\n name: \"iot-jobs-data\",\n description: \"AWS IoT Jobs is a service that allows you to define a set of jobs \\u2014 remote operations that are sent to and executed on one or more devices connected to AWS IoT. For example, you can define a job that instructs a set of devices to download and install application or firmware updates, reboot, rotate certificates, or perform remote troubleshooting operations. To create a job, you make a job document which is a description of the remote operations to be performed, and you specify a list of targets that should perform the operations. The targets can be individual things, thing groups or both. AWS IoT Jobs sends a message to inform the targets that a job is available. The target starts the execution of the job by downloading the job document, performing the operations it specifies, and reporting its progress to AWS IoT. The Jobs service provides commands to track the progress of a job on a specific target and for all the targets of the job\",\n loadSpec: \"aws/iot-jobs-data\"\n }, {\n name: \"iot1click-devices\",\n description: \"Describes all of the AWS IoT 1-Click device-related API operations for the service.\\n Also provides sample requests, responses, and errors for the supported web services\\n protocols\",\n loadSpec: \"aws/iot1click-devices\"\n }, {\n name: \"iot1click-projects\",\n description: \"The AWS IoT 1-Click Projects API Reference\",\n loadSpec: \"aws/iot1click-projects\"\n }, {\n name: \"iotanalytics\",\n description: \"AWS IoT Analytics allows you to collect large amounts of device data, process messages, and store them. You can then query the data and run sophisticated analytics on it. AWS IoT Analytics enables advanced data exploration through integration with Jupyter Notebooks and data visualization through integration with Amazon QuickSight. Traditional analytics and business intelligence tools are designed to process structured data. IoT data often comes from devices that record noisy processes (such as temperature, motion, or sound). As a result the data from these devices can have significant gaps, corrupted messages, and false readings that must be cleaned up before analysis can occur. Also, IoT data is often only meaningful in the context of other data from external sources. AWS IoT Analytics automates the steps required to analyze data from IoT devices. AWS IoT Analytics filters, transforms, and enriches IoT data before storing it in a time-series data store for analysis. You can set up the service to collect only the data you need from your devices, apply mathematical transforms to process the data, and enrich the data with device-specific metadata such as device type and location before storing it. Then, you can analyze your data by running queries using the built-in SQL query engine, or perform more complex analytics and machine learning inference. AWS IoT Analytics includes pre-built models for common IoT use cases so you can answer questions like which devices are about to fail or which customers are at risk of abandoning their wearable devices\",\n loadSpec: \"aws/iotanalytics\"\n }, {\n name: \"iotdeviceadvisor\",\n description: \"AWS IoT Core Device Advisor is a cloud-based, fully managed test capability for validating IoT devices during device software development. Device Advisor provides pre-built tests that you can use to validate IoT devices for reliable and secure connectivity with AWS IoT Core before deploying devices to production. By using Device Advisor, you can confirm that your devices can connect to AWS IoT Core, follow security best practices and, if applicable, receive software updates from IoT Device Management. You can also download signed qualification reports to submit to the AWS Partner Network to get your device qualified for the AWS Partner Device Catalog without the need to send your device in and wait for it to be tested\",\n loadSpec: \"aws/iotdeviceadvisor\"\n }, {\n name: \"iotevents\",\n description: \"AWS IoT Events monitors your equipment or device fleets for failures or changes in operation, and triggers actions when such events occur. You can use AWS IoT Events API operations to create, read, update, and delete inputs and detector models, and to list their versions\",\n loadSpec: \"aws/iotevents\"\n }, {\n name: \"iotevents-data\",\n description: \"AWS IoT Events monitors your equipment or device fleets for failures or changes in operation, and triggers actions when such events occur. AWS IoT Events Data API commands enable you to send inputs to detectors, list detectors, and view or update a detector's status\",\n loadSpec: \"aws/iotevents-data\"\n }, {\n name: \"iotfleethub\",\n description: \"With Fleet Hub for AWS IoT Device Management you can build stand-alone web applications for monitoring the health of your device fleets. Fleet Hub for AWS IoT Device Management is in public preview and is subject to change\",\n loadSpec: \"aws/iotfleethub\"\n }, {\n name: \"iotsecuretunneling\",\n description: \"AWS IoT Secure Tunneling AWS IoT Secure Tunnling enables you to create remote connections to devices deployed in the field. For more information about how AWS IoT Secure Tunneling works, see AWS IoT Secure Tunneling\",\n loadSpec: \"aws/iotsecuretunneling\"\n }, {\n name: \"iotsitewise\",\n description: \"Welcome to the AWS IoT SiteWise API Reference. AWS IoT SiteWise is an AWS service that connects Industrial Internet of Things (IIoT) devices to the power of the AWS Cloud. For more information, see the AWS IoT SiteWise User Guide. For information about AWS IoT SiteWise quotas, see Quotas in the AWS IoT SiteWise User Guide\",\n loadSpec: \"aws/iotsitewise\"\n }, {\n name: \"iotthingsgraph\",\n description: \"AWS IoT Things Graph AWS IoT Things Graph provides an integrated set of tools that enable developers to connect devices and services that use different standards, such as units of measure and communication protocols. AWS IoT Things Graph makes it possible to build IoT applications with little to no code by connecting devices and services and defining how they interact at an abstract level. For more information about how AWS IoT Things Graph works, see the User Guide\",\n loadSpec: \"aws/iotthingsgraph\"\n }, {\n name: \"iotwireless\",\n description: \"AWS IoT Wireless API documentation\",\n loadSpec: \"aws/iotwireless\"\n }, {\n name: \"ivs\",\n description: \"Introduction The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors. The API is an AWS regional service, currently in these regions: us-west-2, us-east-1, and eu-west-1. All API request parameters and URLs are case sensitive. For a summary of notable documentation changes in each release, see Document History. Service Endpoints The following are the Amazon IVS service endpoints (all HTTPS): Region name: US West (Oregon) Region: us-west-2 Endpoint: ivs.us-west-2.amazonaws.com Region name: US East (Virginia) Region: us-east-1 Endpoint: ivs.us-east-1.amazonaws.com Region name: EU West (Dublin) Region: eu-west-1 Endpoint: ivs.eu-west-1.amazonaws.com Allowed Header Values Accept: application/json Accept-Encoding: gzip, deflate Content-Type: application/json Resources The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS): Channel \\u2014 Stores configuration data related to your live stream. You first create a channel and then use the channel\\u2019s stream key to start your live stream. See the Channel endpoints for more information. Stream key \\u2014 An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. See the StreamKey endpoints for more information. Treat the stream key like a secret, since it allows anyone to stream to the channel. Playback key pair \\u2014 Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token. See the PlaybackKeyPair endpoints for more information. Recording configuration \\u2014 Stores configuration related to recording a live stream and where to store the recorded content. Multiple channels can reference the same recording configuration. See the Recording Configuration endpoints for more information. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording Configurations. Authentication versus Authorization Note the differences between these concepts: Authentication is about verifying identity. You need to be authenticated to sign Amazon IVS API requests. Authorization is about granting permissions. You need to be authorized to view Amazon IVS private channels. (Private channels are channels that are enabled for \\\"playback authorization.\\\") Authentication All Amazon IVS API requests must be authenticated with a signature. The AWS Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS API directly, it\\u2019s your responsibility to sign the requests. You generate a signature using valid AWS credentials that have permission to perform the requested action. For example, you must sign PutMetadata requests with a signature generated from an IAM user account that has the ivs:PutMetadata permission. For more information: Authentication and generating signatures \\u2014 See Authenticating Requests (AWS Signature Version 4) in the AWS General Reference. Managing Amazon IVS permissions \\u2014 See Identity and Access Management on the Security page of the Amazon IVS User Guide. Channel Endpoints CreateChannel \\u2014 Creates a new channel and an associated stream key to start streaming. GetChannel \\u2014 Gets the channel configuration for the specified channel ARN (Amazon Resource Name). BatchGetChannel \\u2014 Performs GetChannel on multiple ARNs simultaneously. ListChannels \\u2014 Gets summary information about all channels in your account, in the AWS region where the API request is processed. This list can be filtered to match a specified name or recording-configuration ARN. Filters are mutually exclusive and cannot be used together. If you try to use both filters, you will get an error (409 Conflict Exception). UpdateChannel \\u2014 Updates a channel's configuration. This does not affect an ongoing stream of this channel. You must stop and restart the stream for the changes to take effect. DeleteChannel \\u2014 Deletes the specified channel. StreamKey Endpoints CreateStreamKey \\u2014 Creates a stream key, used to initiate a stream, for the specified channel ARN. GetStreamKey \\u2014 Gets stream key information for the specified ARN. BatchGetStreamKey \\u2014 Performs GetStreamKey on multiple ARNs simultaneously. ListStreamKeys \\u2014 Gets summary information about stream keys for the specified channel. DeleteStreamKey \\u2014 Deletes the stream key for the specified ARN, so it can no longer be used to stream. Stream Endpoints GetStream \\u2014 Gets information about the active (live) stream on a specified channel. ListStreams \\u2014 Gets summary information about live streams in your account, in the AWS region where the API request is processed. StopStream \\u2014 Disconnects the incoming RTMPS stream for the specified channel. Can be used in conjunction with DeleteStreamKey to prevent further streaming to a channel. PutMetadata \\u2014 Inserts metadata into the active stream of the specified channel. A maximum of 5 requests per second per channel is allowed, each with a maximum 1 KB payload. (If 5 TPS is not sufficient for your needs, we recommend batching your data into a single PutMetadata call.) PlaybackKeyPair Endpoints For more information, see Setting Up Private Channels in the Amazon IVS User Guide. ImportPlaybackKeyPair \\u2014 Imports the public portion of a new key pair and returns its arn and fingerprint. The privateKey can then be used to generate viewer authorization tokens, to grant viewers access to private channels (channels enabled for playback authorization). GetPlaybackKeyPair \\u2014 Gets a specified playback authorization key pair and returns the arn and fingerprint. The privateKey held by the caller can be used to generate viewer authorization tokens, to grant viewers access to private channels. ListPlaybackKeyPairs \\u2014 Gets summary information about playback key pairs. DeletePlaybackKeyPair \\u2014 Deletes a specified authorization key pair. This invalidates future viewer tokens generated using the key pair\\u2019s privateKey. RecordingConfiguration Endpoints CreateRecordingConfiguration \\u2014 Creates a new recording configuration, used to enable recording to Amazon S3. GetRecordingConfiguration \\u2014 Gets the recording-configuration metadata for the specified ARN. ListRecordingConfigurations \\u2014 Gets summary information about all recording configurations in your account, in the AWS region where the API request is processed. DeleteRecordingConfiguration \\u2014 Deletes the recording configuration for the specified ARN. AWS Tags Endpoints TagResource \\u2014 Adds or updates tags for the AWS resource with the specified ARN. UntagResource \\u2014 Removes tags from the resource with the specified ARN. ListTagsForResource \\u2014 Gets information about AWS tags for the specified ARN\",\n loadSpec: \"aws/ivs\"\n }, {\n name: \"kafka\",\n description: \"The operations for managing an Amazon MSK cluster\",\n loadSpec: \"aws/kafka\"\n }, {\n name: \"kendra\",\n description: \"Amazon Kendra is a service for indexing large document sets\",\n loadSpec: \"aws/kendra\"\n }, {\n name: \"kinesis\",\n description: \"Amazon Kinesis Data Streams Service API Reference Amazon Kinesis Data Streams is a managed service that scales elastically for real-time processing of streaming big data\",\n loadSpec: \"aws/kinesis\"\n }, {\n name: \"kinesis-video-archived-media\",\n description: \"\",\n loadSpec: \"aws/kinesis-video-archived-media\"\n }, {\n name: \"kinesis-video-media\",\n description: \"\",\n loadSpec: \"aws/kinesis-video-media\"\n }, {\n name: \"kinesis-video-signaling\",\n description: \"Kinesis Video Streams Signaling Service is a intermediate service that establishes a communication channel for discovering peers, transmitting offers and answers in order to establish peer-to-peer connection in webRTC technology\",\n loadSpec: \"aws/kinesis-video-signaling\"\n }, {\n name: \"kinesisanalytics\",\n description: \"Amazon Kinesis Analytics Overview This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see Amazon Kinesis Data Analytics API V2 Documentation. This is the Amazon Kinesis Analytics v1 API Reference. The Amazon Kinesis Analytics Developer Guide provides additional information\",\n loadSpec: \"aws/kinesisanalytics\"\n }, {\n name: \"kinesisanalyticsv2\",\n description: \"Amazon Kinesis Data Analytics is a fully managed service that you can use to process and analyze streaming data using Java, SQL, or Scala. The service enables you to quickly author and run Java, SQL, or Scala code against streaming sources to perform time series analytics, feed real-time dashboards, and create real-time metrics\",\n loadSpec: \"aws/kinesisanalyticsv2\"\n }, {\n name: \"kinesisvideo\",\n description: \"\",\n loadSpec: \"aws/kinesisvideo\"\n }, {\n name: \"kms\",\n description: \"AWS Key Management Service AWS Key Management Service (AWS KMS) is an encryption and key management web service. This guide describes the AWS KMS operations that you can call programmatically. For general information about AWS KMS, see the AWS Key Management Service Developer Guide . AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, macOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to AWS KMS and other AWS services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services. We recommend that you use the AWS SDKs to make programmatic API calls to AWS KMS. Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS 1.2. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes. Signing Requests Requests must be signed by using an access key ID and a secret access key. We strongly recommend that you do not use your AWS account (root) access key ID and secret key for everyday work with AWS KMS. Instead, use the access key ID and secret access key for an IAM user. You can also use the AWS Security Token Service to generate temporary security credentials that you can use to sign requests. All AWS KMS operations require Signature Version 4. Logging API Requests AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and related events for your AWS account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to AWS KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide. Additional Resources For more information about credentials and request signing, see the following: AWS Security Credentials - This topic provides general information about the types of credentials used for accessing AWS. Temporary Security Credentials - This section of the IAM User Guide describes how to create and use temporary security credentials. Signature Version 4 Signing Process - This set of topics walks you through the process of signing a request using an access key ID and a secret access key. Commonly Used API Operations Of the API operations discussed in this guide, the following will prove the most useful for most applications. You will likely perform operations other than these, such as creating keys and assigning policies, by using the console. Encrypt Decrypt GenerateDataKey GenerateDataKeyWithoutPlaintext\",\n loadSpec: \"aws/kms\"\n }, {\n name: \"lakeformation\",\n description: \"AWS Lake Formation Defines the public endpoint for the AWS Lake Formation service\",\n loadSpec: \"aws/lakeformation\"\n }, {\n name: \"lambda\",\n description: \"AWS Lambda Overview This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides additional information. For the service overview, see What is AWS Lambda, and for information about how the service works, see AWS Lambda: How it Works in the AWS Lambda Developer Guide\",\n loadSpec: \"aws/lambda\"\n }, {\n name: \"lex-models\",\n description: \"Amazon Lex Build-Time Actions Amazon Lex is an AWS service for building conversational voice and text interfaces. Use these actions to create, update, and delete conversational bots for new and existing client applications\",\n loadSpec: \"aws/lex-models\"\n }, {\n name: \"lex-runtime\",\n description: 'Amazon Lex provides both build and runtime endpoints. Each endpoint provides a set of operations (API). Your conversational bot uses the runtime API to understand user utterances (user input text or voice). For example, suppose a user says \"I want pizza\", your bot sends this input to Amazon Lex using the runtime API. Amazon Lex recognizes that the user request is for the OrderPizza intent (one of the intents defined in the bot). Then Amazon Lex engages in user conversation on behalf of the bot to elicit required information (slot values, such as pizza size and crust type), and then performs fulfillment activity (that you configured when you created the bot). You use the build-time API to create and manage your Amazon Lex bot. For a list of build-time operations, see the build-time API,',\n loadSpec: \"aws/lex-runtime\"\n }, {\n name: \"lexv2-models\",\n description: \"\",\n loadSpec: \"aws/lexv2-models\"\n }, {\n name: \"lexv2-runtime\",\n description: \"\",\n loadSpec: \"aws/lexv2-runtime\"\n }, {\n name: \"license-manager\",\n description: \"AWS License Manager AWS License Manager makes it easier to manage licenses from software vendors across multiple AWS accounts and on-premises servers\",\n loadSpec: \"aws/license-manager\"\n }, {\n name: \"lightsail\",\n description: \"Amazon Lightsail is the easiest way to get started with Amazon Web Services (AWS) for developers who need to build websites or web applications. It includes everything you need to launch your project quickly - instances (virtual private servers), container services, managed databases, SSD-based block storage, static IP addresses, load balancers, content delivery network (CDN) distributions, DNS management of registered domains, and resource snapshots (backups) - for a low, predictable monthly price. You can manage your Lightsail resources using the Lightsail console, Lightsail API, AWS Command Line Interface (AWS CLI), or SDKs. For more information about Lightsail concepts and tasks, see the Lightsail Dev Guide. This API Reference provides detailed information about the actions, data types, parameters, and errors of the Lightsail service. For more information about the supported AWS Regions, endpoints, and service quotas of the Lightsail service, see Amazon Lightsail Endpoints and Quotas in the AWS General Reference\",\n loadSpec: \"aws/lightsail\"\n }, {\n name: \"location\",\n description: \"Suite of geospatial services including Maps, Places, Tracking, and Geofencing\",\n loadSpec: \"aws/location\"\n }, {\n name: \"logs\",\n description: 'You can use Amazon CloudWatch Logs to monitor, store, and access your log files from EC2 instances, AWS CloudTrail, or other sources. You can then retrieve the associated log data from CloudWatch Logs using the CloudWatch console, CloudWatch Logs commands in the AWS CLI, CloudWatch Logs API, or CloudWatch Logs SDK. You can use CloudWatch Logs to: Monitor logs from EC2 instances in real-time: You can use CloudWatch Logs to monitor applications and systems using log data. For example, CloudWatch Logs can track the number of errors that occur in your application logs and send you a notification whenever the rate of errors exceeds a threshold that you specify. CloudWatch Logs uses your log data for monitoring so no code changes are required. For example, you can monitor application logs for specific literal terms (such as \"NullReferenceException\") or count the number of occurrences of a literal term at a particular position in log data (such as \"404\" status codes in an Apache access log). When the term you are searching for is found, CloudWatch Logs reports the data to a CloudWatch metric that you specify. Monitor AWS CloudTrail logged events: You can create alarms in CloudWatch and receive notifications of particular API activity as captured by CloudTrail. You can use the notification to perform troubleshooting. Archive log data: You can use CloudWatch Logs to store your log data in highly durable storage. You can change the log retention setting so that any log events older than this setting are automatically deleted. The CloudWatch Logs agent makes it easy to quickly send both rotated and non-rotated log data off of a host and into the log service. You can then access the raw log data when you need it',\n loadSpec: \"aws/logs\"\n }, {\n name: \"lookoutequipment\",\n description: \"Amazon Lookout for Equipment is a machine learning service that uses advanced analytics to identify anomalies in machines from sensor data for use in predictive maintenance\",\n loadSpec: \"aws/lookoutequipment\"\n }, {\n name: \"lookoutmetrics\",\n description: \"This is the Amazon Lookout for Metrics API Reference. For an introduction to the service with tutorials for getting started, visit Amazon Lookout for Metrics Developer Guide\",\n loadSpec: \"aws/lookoutmetrics\"\n }, {\n name: \"lookoutvision\",\n description: \"This is the Amazon Lookout for Vision API Reference. It provides descriptions of actions, data types, common parameters, and common errors. Amazon Lookout for Vision enables you to find visual defects in industrial products, accurately and at scale. It uses computer vision to identify missing components in an industrial product, damage to vehicles or structures, irregularities in production lines, and even minuscule defects in silicon wafers \\u2014 or any other physical item where quality is important such as a missing capacitor on printed circuit boards\",\n loadSpec: \"aws/lookoutvision\"\n }, {\n name: \"machinelearning\",\n description: \"Definition of the public APIs exposed by Amazon Machine Learning\",\n loadSpec: \"aws/machinelearning\"\n }, {\n name: \"macie\",\n description: \"Amazon Macie Classic Amazon Macie Classic is a security service that uses machine learning to automatically discover, classify, and protect sensitive data in AWS. Macie Classic recognizes sensitive data such as personally identifiable information (PII) or intellectual property, and provides you with dashboards and alerts that give visibility into how this data is being accessed or moved. For more information, see the Amazon Macie Classic User Guide\",\n loadSpec: \"aws/macie\"\n }, {\n name: \"macie2\",\n description: \"Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS. Macie automates the discovery of sensitive data, such as PII and intellectual property, to provide you with insight into the data that your organization stores in AWS. Macie also provides an inventory of your Amazon S3 buckets, which it continually monitors for you. If Macie detects sensitive data or potential data access issues, it generates detailed findings for you to review and act upon as necessary\",\n loadSpec: \"aws/macie2\"\n }, {\n name: \"managedblockchain\",\n description: \"Amazon Managed Blockchain is a fully managed service for creating and managing blockchain networks using open-source frameworks. Blockchain allows you to build applications where multiple parties can securely and transparently run transactions and share data without the need for a trusted, central authority. Managed Blockchain supports the Hyperledger Fabric and Ethereum open-source frameworks. Because of fundamental differences between the frameworks, some API actions or data types may only apply in the context of one framework and not the other. For example, actions related to Hyperledger Fabric network members such as CreateMember and DeleteMember do not apply to Ethereum. The description for each action indicates the framework or frameworks to which it applies. Data types and properties that apply only in the context of a particular framework are similarly indicated\",\n loadSpec: \"aws/managedblockchain\"\n }, {\n name: \"marketplace-catalog\",\n description: \"Catalog API actions allow you to manage your entities through list, describe, and update capabilities. An entity can be a product or an offer on AWS Marketplace. You can automate your entity update process by integrating the AWS Marketplace Catalog API with your AWS Marketplace product build or deployment pipelines. You can also create your own applications on top of the Catalog API to manage your products on AWS Marketplace\",\n loadSpec: \"aws/marketplace-catalog\"\n }, {\n name: \"marketplace-entitlement\",\n description: \"AWS Marketplace Entitlement Service This reference provides descriptions of the AWS Marketplace Entitlement Service API. AWS Marketplace Entitlement Service is used to determine the entitlement of a customer to a given product. An entitlement represents capacity in a product owned by the customer. For example, a customer might own some number of users or seats in an SaaS application or some amount of data capacity in a multi-tenant database. Getting Entitlement Records GetEntitlements- Gets the entitlements for a Marketplace product\",\n loadSpec: \"aws/marketplace-entitlement\"\n }, {\n name: \"marketplacecommerceanalytics\",\n description: \"Provides AWS Marketplace business intelligence data on-demand\",\n loadSpec: \"aws/marketplacecommerceanalytics\"\n }, {\n name: \"mediaconnect\",\n description: \"API for AWS Elemental MediaConnect\",\n loadSpec: \"aws/mediaconnect\"\n }, {\n name: \"mediaconvert\",\n description: \"AWS Elemental MediaConvert\",\n loadSpec: \"aws/mediaconvert\"\n }, {\n name: \"medialive\",\n description: \"API for AWS Elemental MediaLive\",\n loadSpec: \"aws/medialive\"\n }, {\n name: \"mediapackage\",\n description: \"AWS Elemental MediaPackage\",\n loadSpec: \"aws/mediapackage\"\n }, {\n name: \"mediapackage-vod\",\n description: \"AWS Elemental MediaPackage VOD\",\n loadSpec: \"aws/mediapackage-vod\"\n }, {\n name: \"mediastore\",\n description: \"An AWS Elemental MediaStore container is a namespace that holds folders and objects. You use a container endpoint to create, read, and delete objects\",\n loadSpec: \"aws/mediastore\"\n }, {\n name: \"mediastore-data\",\n description: \"An AWS Elemental MediaStore asset is an object, similar to an object in the Amazon S3 service. Objects are the fundamental entities that are stored in AWS Elemental MediaStore\",\n loadSpec: \"aws/mediastore-data\"\n }, {\n name: \"mediatailor\",\n description: \"Use the AWS Elemental MediaTailor SDKs and CLI to configure scalable ad insertion and linear channels. With MediaTailor, you can assemble existing content into a linear stream and serve targeted ads to viewers while maintaining broadcast quality in over-the-top (OTT) video applications. For information about using the service, including detailed information about the settings covered in this guide, see the AWS Elemental MediaTailor User Guide. Through the SDKs and the CLI you manage AWS Elemental MediaTailor configurations and channels the same as you do through the console. For example, you specify ad insertion behavior and mapping information for the origin server and the ad decision server (ADS)\",\n loadSpec: \"aws/mediatailor\"\n }, {\n name: \"meteringmarketplace\",\n description: \"AWS Marketplace Metering Service This reference provides descriptions of the low-level AWS Marketplace Metering Service API. AWS Marketplace sellers can use this API to submit usage data for custom usage dimensions. For information on the permissions you need to use this API, see AWS Marketing metering and entitlement API permissions in the AWS Marketplace Seller Guide. Submitting Metering Records MeterUsage- Submits the metering record for a Marketplace product. MeterUsage is called from an EC2 instance or a container running on EKS or ECS. BatchMeterUsage- Submits the metering record for a set of customers. BatchMeterUsage is called from a software-as-a-service (SaaS) application. Accepting New Customers ResolveCustomer- Called by a SaaS application during the registration process. When a buyer visits your website during the registration process, the buyer submits a Registration Token through the browser. The Registration Token is resolved through this API to obtain a CustomerIdentifier and Product Code. Entitlement and Metering for Paid Container Products Paid container software products sold through AWS Marketplace must integrate with the AWS Marketplace Metering Service and call the RegisterUsage operation for software entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but you can do so if you want to receive usage data in your seller reports. For more information on using the RegisterUsage operation, see Container-Based Products. BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to verify that the SaaS metering records that you sent are accurate by searching for records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit records over time. For more information, see the AWS CloudTrail User Guide\",\n loadSpec: \"aws/meteringmarketplace\"\n }, {\n name: \"mgh\",\n description: \"The AWS Migration Hub API methods help to obtain server and application migration status and integrate your resource-specific migration tool by providing a programmatic interface to Migration Hub. Remember that you must set your AWS Migration Hub home region before you call any of these APIs, or a HomeRegionNotSetException error will be returned. Also, you must make the API calls while in your home region\",\n loadSpec: \"aws/mgh\"\n }, {\n name: \"mgn\",\n description: \"The Application Migration Service service\",\n loadSpec: \"aws/mgn\"\n }, {\n name: \"migrationhub-config\",\n description: \"The AWS Migration Hub home region APIs are available specifically for working with your Migration Hub home region. You can use these APIs to determine a home region, as well as to create and work with controls that describe the home region. You must make API calls for write actions (create, notify, associate, disassociate, import, or put) while in your home region, or a HomeRegionNotSetException error is returned. API calls for read actions (list, describe, stop, and delete) are permitted outside of your home region. If you call a write API outside the home region, an InvalidInputException is returned. You can call GetHomeRegion action to obtain the account's Migration Hub home region. For specific API usage, see the sections that follow in this AWS Migration Hub Home Region API reference\",\n loadSpec: \"aws/migrationhub-config\"\n }, {\n name: \"mobile\",\n description: \"AWS Mobile Service provides mobile app and website developers with capabilities required to configure AWS resources and bootstrap their developer desktop projects with the necessary SDKs, constants, tools and samples to make use of those resources\",\n loadSpec: \"aws/mobile\"\n }, {\n name: \"mq\",\n description: \"Amazon MQ is a managed message broker service for Apache ActiveMQ and RabbitMQ that makes it easy to set up and operate message brokers in the cloud. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols\",\n loadSpec: \"aws/mq\"\n }, {\n name: \"mturk\",\n description: \"Amazon Mechanical Turk API Reference\",\n loadSpec: \"aws/mturk\"\n }, {\n name: \"mwaa\",\n description: \"Amazon Managed Workflows for Apache Airflow This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What Is Amazon MWAA?\",\n loadSpec: \"aws/mwaa\"\n }, {\n name: \"neptune\",\n description: \"Amazon Neptune Amazon Neptune is a fast, reliable, fully-managed graph database service that makes it easy to build and run applications that work with highly connected datasets. The core of Amazon Neptune is a purpose-built, high-performance graph database engine optimized for storing billions of relationships and querying the graph with milliseconds latency. Amazon Neptune supports popular graph models Property Graph and W3C's RDF, and their respective query languages Apache TinkerPop Gremlin and SPARQL, allowing you to easily build queries that efficiently navigate highly connected datasets. Neptune powers graph use cases such as recommendation engines, fraud detection, knowledge graphs, drug discovery, and network security. This interface reference for Amazon Neptune contains documentation for a programming or command line interface you can use to manage Amazon Neptune. Note that Amazon Neptune is asynchronous, which means that some interfaces might require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. The reference structure is as follows, and we list following some related topics from the user guide\",\n loadSpec: \"aws/neptune\"\n }, {\n name: \"network-firewall\",\n description: \"This is the API Reference for AWS Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the AWS REST APIs, see AWS APIs. To access Network Firewall using the REST API endpoint: https://network-firewall..amazonaws.com Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or AWS Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source intrusion detection system (IDS) engine. For information about Suricata, see the Suricata website. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known AWS service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints\",\n loadSpec: \"aws/network-firewall\"\n }, {\n name: \"networkmanager\",\n description: \"Transit Gateway Network Manager (Network Manager) enables you to create a global network, in which you can monitor your AWS and on-premises networks that are built around transit gateways. The Network Manager APIs are supported in the US West (Oregon) Region only. You must specify the us-west-2 Region in all requests made to Network Manager\",\n loadSpec: \"aws/networkmanager\"\n }, {\n name: \"opsworks\",\n description: \"AWS OpsWorks Welcome to the AWS OpsWorks Stacks API Reference. This guide provides descriptions, syntax, and usage examples for AWS OpsWorks Stacks actions and data types, including common parameters and error codes. AWS OpsWorks Stacks is an application management service that provides an integrated experience for overseeing the complete application lifecycle. For information about this product, go to the AWS OpsWorks details page. SDKs and CLI The most common way to use the AWS OpsWorks Stacks API is by using the AWS Command Line Interface (CLI) or by using one of the AWS SDKs to implement applications in your preferred language. For more information, see: AWS CLI AWS SDK for Java AWS SDK for .NET AWS SDK for PHP 2 AWS SDK for Ruby AWS SDK for Node.js AWS SDK for Python(Boto) Endpoints AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Stacks can only be accessed or managed within the endpoint in which they are created. opsworks.us-east-1.amazonaws.com opsworks.us-east-2.amazonaws.com opsworks.us-west-1.amazonaws.com opsworks.us-west-2.amazonaws.com opsworks.ca-central-1.amazonaws.com (API only; not available in the AWS console) opsworks.eu-west-1.amazonaws.com opsworks.eu-west-2.amazonaws.com opsworks.eu-west-3.amazonaws.com opsworks.eu-central-1.amazonaws.com opsworks.ap-northeast-1.amazonaws.com opsworks.ap-northeast-2.amazonaws.com opsworks.ap-south-1.amazonaws.com opsworks.ap-southeast-1.amazonaws.com opsworks.ap-southeast-2.amazonaws.com opsworks.sa-east-1.amazonaws.com Chef Versions When you call CreateStack, CloneStack, or UpdateStack we recommend you use the ConfigurationManager parameter to specify the Chef version. The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information, see Chef Versions. You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible\",\n loadSpec: \"aws/opsworks\"\n }, {\n name: \"opsworkscm\",\n description: \"AWS OpsWorks CM AWS OpsWorks for configuration management (CM) is a service that runs and manages configuration management servers. You can use AWS OpsWorks CM to create and manage AWS OpsWorks for Chef Automate and AWS OpsWorks for Puppet Enterprise servers, and add or remove nodes for the servers to manage. Glossary of terms Server: A configuration management server that can be highly-available. The configuration management server runs on an Amazon Elastic Compute Cloud (EC2) instance, and may use various other AWS services, such as Amazon Relational Database Service (RDS) and Elastic Load Balancing. A server is a generic abstraction over the configuration manager that you want to use, much like Amazon RDS. In AWS OpsWorks CM, you do not start or stop servers. After you create servers, they continue to run until they are deleted. Engine: The engine is the specific configuration manager that you want to use. Valid values in this release include ChefAutomate and Puppet. Backup: This is an application-level backup of the data that the configuration manager stores. AWS OpsWorks CM creates an S3 bucket for backups when you launch the first server. A backup maintains a snapshot of a server's configuration-related attributes at the time the backup starts. Events: Events are always related to a server. Events are written during server creation, when health checks run, when backups are created, when system maintenance is performed, etc. When you delete a server, the server's events are also deleted. Account attributes: Every account has attributes that are assigned in the AWS OpsWorks CM database. These attributes store information about configuration limits (servers, backups, etc.) and your customer account. Endpoints AWS OpsWorks CM supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Your servers can only be accessed or managed within the endpoint in which they are created. opsworks-cm.us-east-1.amazonaws.com opsworks-cm.us-east-2.amazonaws.com opsworks-cm.us-west-1.amazonaws.com opsworks-cm.us-west-2.amazonaws.com opsworks-cm.ap-northeast-1.amazonaws.com opsworks-cm.ap-southeast-1.amazonaws.com opsworks-cm.ap-southeast-2.amazonaws.com opsworks-cm.eu-central-1.amazonaws.com opsworks-cm.eu-west-1.amazonaws.com For more information, see AWS OpsWorks endpoints and quotas in the AWS General Reference. Throttling limits All API operations allow for five requests per second with a burst of 10 requests per second\",\n loadSpec: \"aws/opsworkscm\"\n }, {\n name: \"organizations\",\n description: \"AWS Organizations\",\n loadSpec: \"aws/organizations\"\n }, {\n name: \"outposts\",\n description: \"AWS Outposts is a fully managed service that extends AWS infrastructure, APIs, and tools to customer premises. By providing local access to AWS managed infrastructure, AWS Outposts enables customers to build and run applications on premises using the same programming interfaces as in AWS Regions, while using local compute and storage resources for lower latency and local data processing needs\",\n loadSpec: \"aws/outposts\"\n }, {\n name: \"personalize\",\n description: \"Amazon Personalize is a machine learning service that makes it easy to add individualized recommendations to customers\",\n loadSpec: \"aws/personalize\"\n }, {\n name: \"personalize-events\",\n description: \"Amazon Personalize can consume real-time user event data, such as stream or click data, and use it for model training either alone or combined with historical data. For more information see Recording Events\",\n loadSpec: \"aws/personalize-events\"\n }, {\n name: \"personalize-runtime\",\n description: \"\",\n loadSpec: \"aws/personalize-runtime\"\n }, {\n name: \"pi\",\n description: \"Amazon RDS Performance Insights Amazon RDS Performance Insights enables you to monitor and explore different dimensions of database load based on data captured from a running DB instance. The guide provides detailed information about Performance Insights data types, parameters and errors. When Performance Insights is enabled, the Amazon RDS Performance Insights API provides visibility into the performance of your DB instance. Amazon CloudWatch provides the authoritative source for AWS service-vended monitoring metrics. Performance Insights offers a domain-specific view of DB load. DB load is measured as Average Active Sessions. Performance Insights provides the data to API consumers as a two-dimensional time-series dataset. The time dimension provides DB load data for each time point in the queried time range. Each time point decomposes overall load in relation to the requested dimensions, measured at that time point. Examples include SQL, Wait event, User, and Host. To learn more about Performance Insights and Amazon Aurora DB instances, go to the Amazon Aurora User Guide. To learn more about Performance Insights and Amazon RDS DB instances, go to the Amazon RDS User Guide\",\n loadSpec: \"aws/pi\"\n }, {\n name: \"pinpoint\",\n description: \"Doc Engage API - Amazon Pinpoint API\",\n loadSpec: \"aws/pinpoint\"\n }, {\n name: \"pinpoint-email\",\n description: \"Amazon Pinpoint Email Service Welcome to the Amazon Pinpoint Email API Reference. This guide provides information about the Amazon Pinpoint Email API (version 1.0), including supported operations, data types, parameters, and schemas. Amazon Pinpoint is an AWS service that you can use to engage with your customers across multiple messaging channels. You can use Amazon Pinpoint to send email, SMS text messages, voice messages, and push notifications. The Amazon Pinpoint Email API provides programmatic access to options that are unique to the email channel and supplement the options provided by the Amazon Pinpoint API. If you're new to Amazon Pinpoint, you might find it helpful to also review the Amazon Pinpoint Developer Guide. The Amazon Pinpoint Developer Guide provides tutorials, code samples, and procedures that demonstrate how to use Amazon Pinpoint features programmatically and how to integrate Amazon Pinpoint functionality into mobile apps and other types of applications. The guide also provides information about key topics such as Amazon Pinpoint integration with other AWS services and the limits that apply to using the service. The Amazon Pinpoint Email API is available in several AWS Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see AWS Service Endpoints in the Amazon Web Services General Reference. To learn more about AWS Regions, see Managing AWS Regions in the Amazon Web Services General Reference. In each Region, AWS maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see AWS Global Infrastructure\",\n loadSpec: \"aws/pinpoint-email\"\n }, {\n name: \"pinpoint-sms-voice\",\n description: \"Pinpoint SMS and Voice Messaging public facing APIs\",\n loadSpec: \"aws/pinpoint-sms-voice\"\n }, {\n name: \"polly\",\n description: \"Amazon Polly is a web service that makes it easy to synthesize speech from text. The Amazon Polly service provides API operations for synthesizing high-quality speech from plain text and Speech Synthesis Markup Language (SSML), along with managing pronunciations lexicons that enable you to get the best results for your application domain\",\n loadSpec: \"aws/polly\"\n }, {\n name: \"pricing\",\n description: \"AWS Price List Service API (AWS Price List Service) is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The AWS Price List Service uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the AWS Price List Service to build cost control and scenario planning tools, reconcile billing data, forecast future spend for budgeting purposes, and provide cost benefit analysis that compare your internal workloads with AWS. Use GetServices without a service code to retrieve the service codes for all AWS services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType. Service Endpoint AWS Price List Service API provides the following two endpoints: https://api.pricing.us-east-1.amazonaws.com https://api.pricing.ap-south-1.amazonaws.com\",\n loadSpec: \"aws/pricing\"\n }, {\n name: \"qldb\",\n description: \"The control plane for Amazon QLDB\",\n loadSpec: \"aws/qldb\"\n }, {\n name: \"qldb-session\",\n description: \"The transactional data APIs for Amazon QLDB Instead of interacting directly with this API, we recommend using the QLDB driver or the QLDB shell to execute data transactions on a ledger. If you are working with an AWS SDK, use the QLDB driver. The driver provides a high-level abstraction layer above this QLDB Session data plane and manages SendCommand API calls for you. For information and a list of supported programming languages, see Getting started with the driver in the Amazon QLDB Developer Guide. If you are working with the AWS Command Line Interface (AWS CLI), use the QLDB shell. The shell is a command line interface that uses the QLDB driver to interact with a ledger. For information, see Accessing Amazon QLDB using the QLDB shell\",\n loadSpec: \"aws/qldb-session\"\n }, {\n name: \"quicksight\",\n description: \"Amazon QuickSight API Reference Amazon QuickSight is a fully managed, serverless business intelligence service for the AWS Cloud that makes it easy to extend data and insights to every user in your organization. This API reference contains documentation for a programming interface that you can use to manage Amazon QuickSight\",\n loadSpec: \"aws/quicksight\"\n }, {\n name: \"ram\",\n description: \"Use AWS Resource Access Manager to share AWS resources between AWS accounts. To share a resource, you create a resource share, associate the resource with the resource share, and specify the principals that can access the resources associated with the resource share. The following principals are supported: AWS accounts, organizational units (OU) from AWS Organizations, and organizations from AWS Organizations. For more information, see the AWS Resource Access Manager User Guide\",\n loadSpec: \"aws/ram\"\n }, {\n name: \"rds\",\n description: \"Amazon Relational Database Service Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizeable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique. Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, Microsoft SQL Server, Oracle, or Amazon Aurora database server. These capabilities mean that the code, applications, and tools you already use today with your existing databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB instance. Amazon RDS is flexible: you can scale your DB instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use. This interface reference for Amazon RDS contains documentation for a programming or command line interface you can use to manage Amazon RDS. Amazon RDS is asynchronous, which means that some interfaces might require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. The reference structure is as follows, and we list following some related topics from the user guide. Amazon RDS API Reference For the alphabetical list of API actions, see API Actions. For the alphabetical list of data types, see Data Types. For a list of common query parameters, see Common Parameters. For descriptions of the error codes, see Common Errors. Amazon RDS User Guide For a summary of the Amazon RDS interfaces, see Available RDS Interfaces. For more information about how to use the Query API, see Using the Query API\",\n loadSpec: \"aws/rds\"\n }, {\n name: \"rds-data\",\n description: \"Amazon RDS Data Service Amazon RDS provides an HTTP endpoint to run SQL statements on an Amazon Aurora Serverless DB cluster. To run these statements, you work with the Data Service API. For more information about the Data Service API, see Using the Data API for Aurora Serverless in the Amazon Aurora User Guide\",\n loadSpec: \"aws/rds-data\"\n }, {\n name: \"redshift\",\n description: \"Amazon Redshift Overview This is an interface reference for Amazon Redshift. It contains documentation for one of the programming or command line interfaces you can use to manage Amazon Redshift clusters. Note that Amazon Redshift is asynchronous, which means that some interfaces may require techniques, such as polling or asynchronous callback handlers, to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a change is applied immediately, on the next instance reboot, or during the next maintenance window. For a summary of the Amazon Redshift cluster management interfaces, go to Using the Amazon Redshift Management Interfaces. Amazon Redshift manages all the work of setting up, operating, and scaling a data warehouse: provisioning capacity, monitoring and backing up the cluster, and applying patches and upgrades to the Amazon Redshift engine. You can focus on using your data to acquire new insights for your business and customers. If you are a first-time user of Amazon Redshift, we recommend that you begin by reading the Amazon Redshift Getting Started Guide. If you are a database developer, the Amazon Redshift Database Developer Guide explains how to design, build, query, and maintain the databases that make up your data warehouse\",\n loadSpec: \"aws/redshift\"\n }, {\n name: \"redshift-data\",\n description: \"You can use the Amazon Redshift Data API to run queries on Amazon Redshift tables. You can run individual SQL statements, which are committed if the statement succeeds. For more information about the Amazon Redshift Data API, see Using the Amazon Redshift Data API in the Amazon Redshift Cluster Management Guide\",\n loadSpec: \"aws/redshift-data\"\n }, {\n name: \"rekognition\",\n description: \"This is the Amazon Rekognition API reference\",\n loadSpec: \"aws/rekognition\"\n }, {\n name: \"resource-groups\",\n description: \"AWS Resource Groups AWS Resource Groups lets you organize AWS resources such as Amazon EC2 instances, Amazon Relational Database Service databases, and Amazon S3 buckets into groups using criteria that you define as tags. A resource group is a collection of resources that match the resource types specified in a query, and share one or more tags or portions of tags. You can create a group of resources based on their roles in your cloud infrastructure, lifecycle stages, regions, application layers, or virtually any criteria. Resource Groups enable you to automate management tasks, such as those in AWS Systems Manager Automation documents, on tag-related resources in AWS Systems Manager. Groups of tagged resources also let you quickly view a custom console in AWS Systems Manager that shows AWS Config compliance and other monitoring data about member resources. To create a resource group, build a resource query, and specify tags that identify the criteria that members of the group have in common. Tags are key-value pairs. For more information about Resource Groups, see the AWS Resource Groups User Guide. AWS Resource Groups uses a REST-compliant API that you can use to perform the following types of operations. Create, Read, Update, and Delete (CRUD) operations on resource groups and resource query entities Applying, editing, and removing tags from resource groups Resolving resource group member ARNs so they can be returned as search results Getting data about resources that are members of a group Searching AWS resources based on a resource query\",\n loadSpec: \"aws/resource-groups\"\n }, {\n name: \"resourcegroupstaggingapi\",\n description: \"Resource Groups Tagging API\",\n loadSpec: \"aws/resourcegroupstaggingapi\"\n }, {\n name: \"robomaker\",\n description: \"This section provides documentation for the AWS RoboMaker API operations\",\n loadSpec: \"aws/robomaker\"\n }, {\n name: \"route53\",\n description: \"Amazon Route 53 is a highly available and scalable Domain Name System (DNS) web service\",\n loadSpec: \"aws/route53\"\n }, {\n name: \"route53domains\",\n description: \"Amazon Route 53 API actions let you register domain names and perform related operations\",\n loadSpec: \"aws/route53domains\"\n }, {\n name: \"route53resolver\",\n description: \"When you create a VPC using Amazon VPC, you automatically get DNS resolution within the VPC from Route 53 Resolver. By default, Resolver answers DNS queries for VPC domain names such as domain names for EC2 instances or ELB load balancers. Resolver performs recursive lookups against public name servers for all other domain names. You can also configure DNS resolution between your VPC and your network over a Direct Connect or VPN connection: Forward DNS queries from resolvers on your network to Route 53 Resolver DNS resolvers on your network can forward DNS queries to Resolver in a specified VPC. This allows your DNS resolvers to easily resolve domain names for AWS resources such as EC2 instances or records in a Route 53 private hosted zone. For more information, see How DNS Resolvers on Your Network Forward DNS Queries to Route 53 Resolver in the Amazon Route 53 Developer Guide. Conditionally forward queries from a VPC to resolvers on your network You can configure Resolver to forward queries that it receives from EC2 instances in your VPCs to DNS resolvers on your network. To forward selected queries, you create Resolver rules that specify the domain names for the DNS queries that you want to forward (such as example.com), and the IP addresses of the DNS resolvers on your network that you want to forward the queries to. If a query matches multiple rules (example.com, acme.example.com), Resolver chooses the rule with the most specific match (acme.example.com) and forwards the query to the IP addresses that you specified in that rule. For more information, see How Route 53 Resolver Forwards DNS Queries from Your VPCs to Your Network in the Amazon Route 53 Developer Guide. Like Amazon VPC, Resolver is regional. In each region where you have VPCs, you can choose whether to forward queries from your VPCs to your network (outbound queries), from your network to your VPCs (inbound queries), or both\",\n loadSpec: \"aws/route53resolver\"\n }, {\n name: \"s3control\",\n description: \"AWS S3 Control provides access to Amazon S3 control plane actions\",\n loadSpec: \"aws/s3control\"\n }, {\n name: \"s3outposts\",\n description: \"Amazon S3 on Outposts provides access to S3 on Outposts operations\",\n loadSpec: \"aws/s3outposts\"\n }, {\n name: \"sagemaker\",\n description: \"Provides APIs for creating and managing Amazon SageMaker resources. Other Resources: Amazon SageMaker Developer Guide Amazon Augmented AI Runtime API Reference\",\n loadSpec: \"aws/sagemaker\"\n }, {\n name: \"sagemaker-a2i-runtime\",\n description: \"Amazon Augmented AI is in preview release and is subject to change. We do not recommend using this product in production environments. Amazon Augmented AI (Amazon A2I) adds the benefit of human judgment to any machine learning application. When an AI application can't evaluate data with a high degree of confidence, human reviewers can take over. This human review is called a human review workflow. To create and start a human review workflow, you need three resources: a worker task template, a flow definition, and a human loop. For information about these resources and prerequisites for using Amazon A2I, see Get Started with Amazon Augmented AI in the Amazon SageMaker Developer Guide. This API reference includes information about API actions and data types that you can use to interact with Amazon A2I programmatically. Use this guide to: Start a human loop with the StartHumanLoop operation when using Amazon A2I with a custom task type. To learn more about the difference between custom and built-in task types, see Use Task Types . To learn how to start a human loop using this API, see Create and Start a Human Loop for a Custom Task Type in the Amazon SageMaker Developer Guide. Manage your human loops. You can list all human loops that you have created, describe individual human loops, and stop and delete human loops. To learn more, see Monitor and Manage Your Human Loop in the Amazon SageMaker Developer Guide. Amazon A2I integrates APIs from various AWS services to create and start human review workflows for those services. To learn how Amazon A2I uses these APIs, see Use APIs in Amazon A2I in the Amazon SageMaker Developer Guide\",\n loadSpec: \"aws/sagemaker-a2i-runtime\"\n }, {\n name: \"sagemaker-edge\",\n description: \"SageMaker Edge Manager dataplane service for communicating with active agents\",\n loadSpec: \"aws/sagemaker-edge\"\n }, {\n name: \"sagemaker-featurestore-runtime\",\n description: \"Contains all data plane API operations and data types for the Amazon SageMaker Feature Store. Use this API to put, delete, and retrieve (get) features from a feature store. Use the following operations to configure your OnlineStore and OfflineStore features, and to create and manage feature groups: CreateFeatureGroup DeleteFeatureGroup DescribeFeatureGroup ListFeatureGroups\",\n loadSpec: \"aws/sagemaker-featurestore-runtime\"\n }, {\n name: \"sagemaker-runtime\",\n description: \"The Amazon SageMaker runtime API\",\n loadSpec: \"aws/sagemaker-runtime\"\n }, {\n name: \"savingsplans\",\n description: \"Savings Plans are a pricing model that offer significant savings on AWS usage (for example, on Amazon EC2 instances). You commit to a consistent amount of usage, in USD per hour, for a term of 1 or 3 years, and receive a lower price for that usage. For more information, see the AWS Savings Plans User Guide\",\n loadSpec: \"aws/savingsplans\"\n }, {\n name: \"schemas\",\n description: \"Amazon EventBridge Schema Registry\",\n loadSpec: \"aws/schemas\"\n }, {\n name: \"sdb\",\n description: \"Amazon SimpleDB is a web service providing the core database functions of data indexing and querying in the cloud. By offloading the time and effort associated with building and operating a web-scale database, SimpleDB provides developers the freedom to focus on application development. A traditional, clustered relational database requires a sizable upfront capital outlay, is complex to design, and often requires extensive and repetitive database administration. Amazon SimpleDB is dramatically simpler, requiring no schema, automatically indexing your data and providing a simple API for storage and access. This approach eliminates the administrative burden of data modeling, index maintenance, and performance tuning. Developers gain access to this functionality within Amazon's proven computing environment, are able to scale instantly, and pay only for what they use. Visit http://aws.amazon.com/simpledb/ for more information\",\n loadSpec: \"aws/sdb\"\n }, {\n name: \"secretsmanager\",\n description: \"AWS Secrets Manager API Reference AWS Secrets Manager provides a service to enable you to store, manage, and retrieve, secrets. This guide provides descriptions of the Secrets Manager API. For more information about using this service, see the AWS Secrets Manager User Guide. API Version This version of the Secrets Manager API Reference documents the Secrets Manager API version 2017-10-17. As an alternative to using the API, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms such as Java, Ruby, .NET, iOS, and Android. The SDKs provide a convenient way to create programmatic access to AWS Secrets Manager. For example, the SDKs provide cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the AWS SDKs, including downloading and installing them, see Tools for Amazon Web Services. We recommend you use the AWS SDKs to make programmatic API calls to Secrets Manager. However, you also can use the Secrets Manager HTTP Query API to make direct calls to the Secrets Manager web service. To learn more about the Secrets Manager HTTP Query API, see Making Query Requests in the AWS Secrets Manager User Guide. Secrets Manager API supports GET and POST requests for all actions, and doesn't require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request. Support and Feedback for AWS Secrets Manager We welcome your feedback. Send your comments to awssecretsmanager-feedback@amazon.com, or post your feedback and questions in the AWS Secrets Manager Discussion Forum. For more information about the AWS Discussion Forums, see Forums Help. How examples are presented The JSON that AWS Secrets Manager expects as your request parameters and the service returns as a response to HTTP query requests contain single, long strings without line breaks or white space formatting. The JSON shown in the examples displays the code formatted with both line breaks and white space to improve readability. When example input parameters can also cause long strings extending beyond the screen, you can insert line breaks to enhance readability. You should always submit the input as a single JSON text string. Logging API Requests AWS Secrets Manager supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information that's collected by AWS CloudTrail, you can determine the requests successfully made to Secrets Manager, who made the request, when it was made, and so on. For more about AWS Secrets Manager and support for AWS CloudTrail, see Logging AWS Secrets Manager Events with AWS CloudTrail in the AWS Secrets Manager User Guide. To learn more about CloudTrail, including enabling it and find your log files, see the AWS CloudTrail User Guide\",\n loadSpec: \"aws/secretsmanager\"\n }, {\n name: \"securityhub\",\n description: \"Security Hub provides you with a comprehensive view of the security state of your AWS environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from AWS accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the AWS Security Hub User Guide . When you use operations in the Security Hub API, the requests are executed only in the AWS Region that is currently active or in the specific AWS Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, execute the same command for each Region to apply the change to. For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the administrator account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from. The following throttling limits apply to using Security Hub API operations. BatchEnableStandards - RateLimit of 1 request per second, BurstLimit of 1 request per second. GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second. UpdateFindings - RateLimit of 1 request per second. BurstLimit of 5 requests per second. UpdateStandardsControl - RateLimit of 1 request per second, BurstLimit of 5 requests per second. All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second\",\n loadSpec: \"aws/securityhub\"\n }, {\n name: \"serverlessrepo\",\n description: \"The AWS Serverless Application Repository makes it easy for developers and enterprises to quickly find\\n and deploy serverless applications in the AWS Cloud. For more information about serverless applications,\\n see Serverless Computing and Applications on the AWS website.The AWS Serverless Application Repository is deeply integrated with the AWS Lambda console, so that developers of \\n all levels can get started with serverless computing without needing to learn anything new. You can use category \\n keywords to browse for applications such as web and mobile backends, data processing applications, or chatbots. \\n You can also search for applications by name, publisher, or event source. To use an application, you simply choose it, \\n configure any required fields, and deploy it with a few clicks. You can also easily publish applications, sharing them publicly with the community at large, or privately\\n within your team or across your organization. To publish a serverless application (or app), you can use the\\n AWS Management Console, AWS Command Line Interface (AWS CLI), or AWS SDKs to upload the code. Along with the\\n code, you upload a simple manifest file, also known as the AWS Serverless Application Model (AWS SAM) template.\\n For more information about AWS SAM, see AWS Serverless Application Model (AWS SAM) on the AWS Labs\\n GitHub repository.The AWS Serverless Application Repository Developer Guide contains more information about the two developer\\n experiences available:\\n \\n Consuming Applications \\u2013 Browse for applications and view information about them, including\\n source code and readme files. Also install, configure, and deploy applications of your choosing. \\n Publishing Applications \\u2013 Configure and upload applications to make them available to other\\n developers, and publish new versions of applications\",\n loadSpec: \"aws/serverlessrepo\"\n }, {\n name: \"service-quotas\",\n description: \"With Service Quotas, you can view and manage your quotas easily as your AWS workloads grow. Quotas, also referred to as limits, are the maximum number of resources that you can create in your AWS account. For more information, see the Service Quotas User Guide\",\n loadSpec: \"aws/service-quotas\"\n }, {\n name: \"servicecatalog\",\n description: \"AWS Service Catalog AWS Service Catalog enables organizations to create and manage catalogs of IT services that are approved for AWS. To get the most out of this documentation, you should be familiar with the terminology discussed in AWS Service Catalog Concepts\",\n loadSpec: \"aws/servicecatalog\"\n }, {\n name: \"servicecatalog-appregistry\",\n description: \"AWS Service Catalog AppRegistry enables organizations to understand the application context of their AWS resources. AppRegistry provides a repository of your applications, their resources, and the application metadata that you use within your enterprise\",\n loadSpec: \"aws/servicecatalog-appregistry\"\n }, {\n name: \"servicediscovery\",\n description: \"AWS Cloud Map lets you configure public DNS, private DNS, or HTTP namespaces that your microservice applications run in. When an instance of the service becomes available, you can call the AWS Cloud Map API to register the instance with AWS Cloud Map. For public or private DNS namespaces, AWS Cloud Map automatically creates DNS records and an optional health check. Clients that submit public or private DNS queries, or HTTP requests, for the service receive an answer that contains up to eight healthy records\",\n loadSpec: \"aws/servicediscovery\"\n }, {\n name: \"ses\",\n description: \"Amazon Simple Email Service This document contains reference information for the Amazon Simple Email Service (Amazon SES) API, version 2010-12-01. This document is best used in conjunction with the Amazon SES Developer Guide. For a list of Amazon SES endpoints to use in service requests, see Regions and Amazon SES in the Amazon SES Developer Guide\",\n loadSpec: \"aws/ses\"\n }, {\n name: \"sesv2\",\n description: \"Amazon SES API v2 Welcome to the Amazon SES API v2 Reference. This guide provides information about the Amazon SES API v2, including supported operations, data types, parameters, and schemas. Amazon SES is an AWS service that you can use to send email messages to your customers. If you're new to Amazon SES API v2, you might find it helpful to also review the Amazon Simple Email Service Developer Guide. The Amazon SES Developer Guide provides information and code samples that demonstrate how to use Amazon SES API v2 features programmatically. The Amazon SES API v2 is available in several AWS Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see AWS Service Endpoints in the Amazon Web Services General Reference. To learn more about AWS Regions, see Managing AWS Regions in the Amazon Web Services General Reference. In each Region, AWS maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see AWS Global Infrastructure\",\n loadSpec: \"aws/sesv2\"\n }, {\n name: \"shield\",\n description: \"AWS Shield Advanced This is the AWS Shield Advanced API Reference. This guide is for developers who need detailed information about the AWS Shield Advanced API actions, data types, and errors. For detailed information about AWS WAF and AWS Shield Advanced features and an overview of how to use the AWS WAF and AWS Shield Advanced APIs, see the AWS WAF and AWS Shield Developer Guide\",\n loadSpec: \"aws/shield\"\n }, {\n name: \"signer\",\n description: \"AWS Signer is a fully managed code signing service to help you ensure the trust and integrity of your code. AWS Signer supports the following applications: With code signing for AWS Lambda, you can sign AWS Lambda deployment packages. Integrated support is provided for Amazon S3, Amazon CloudWatch, and AWS CloudTrail. In order to sign code, you create a signing profile and then use Signer to sign Lambda zip files in S3. With code signing for IoT, you can sign code for any IoT device that is supported by AWS. IoT code signing is available for Amazon FreeRTOS and AWS IoT Device Management, and is integrated with AWS Certificate Manager (ACM). In order to sign code, you import a third-party code signing certificate using ACM, and use that to sign updates in Amazon FreeRTOS and AWS IoT Device Management. For more information about AWS Signer, see the AWS Signer Developer Guide\",\n loadSpec: \"aws/signer\"\n }, {\n name: \"sms\",\n description: \"AWS Server Migration Service AWS Server Migration Service (AWS SMS) makes it easier and faster for you to migrate your on-premises workloads to AWS. To learn more about AWS SMS, see the following resources: AWS Server Migration Service product page AWS Server Migration Service User Guide\",\n loadSpec: \"aws/sms\"\n }, {\n name: \"sms-voice\",\n description: \"Pinpoint SMS and Voice Messaging public facing APIs\",\n loadSpec: \"aws/sms-voice\"\n }, {\n name: \"snowball\",\n description: \"AWS Snow Family is a petabyte-scale data transport solution that uses secure devices to transfer large amounts of data between your on-premises data centers and Amazon Simple Storage Service (Amazon S3). The Snow commands described here provide access to the same functionality that is available in the AWS Snow Family Management Console, which enables you to create and manage jobs for a Snow device. To transfer data locally with a Snow device, you'll need to use the Snowball Edge client or the Amazon S3 API Interface for Snowball or AWS OpsHub for Snow Family. For more information, see the User Guide\",\n loadSpec: \"aws/snowball\"\n }, {\n name: \"sns\",\n description: \"Amazon Simple Notification Service Amazon Simple Notification Service (Amazon SNS) is a web service that enables you to build distributed web-enabled applications. Applications can use Amazon SNS to easily push real-time notification messages to interested subscribers over multiple delivery protocols. For more information about this product see the Amazon SNS product page. For detailed information about Amazon SNS features and their associated API calls, see the Amazon SNS Developer Guide. For information on the permissions you need to use this API, see Identity and access management in Amazon SNS in the Amazon SNS Developer Guide. We also provide SDKs that enable you to access Amazon SNS from your preferred programming language. The SDKs contain functionality that automatically takes care of tasks such as: cryptographically signing your service requests, retrying requests, and handling error responses. For a list of available SDKs, go to Tools for Amazon Web Services\",\n loadSpec: \"aws/sns\"\n }, {\n name: \"sqs\",\n description: \"Welcome to the Amazon Simple Queue Service API Reference. Amazon Simple Queue Service (Amazon SQS) is a reliable, highly-scalable hosted queue for storing messages as they travel between applications or microservices. Amazon SQS moves data between distributed application components and helps you decouple these components. For information on the permissions you need to use this API, see Identity and access management in the Amazon Simple Queue Service Developer Guide. You can use AWS SDKs to access Amazon SQS using your favorite programming language. The SDKs perform tasks such as the following automatically: Cryptographically sign your service requests Retry requests Handle error responses Additional information Amazon SQS Product Page Amazon Simple Queue Service Developer Guide Making API Requests Amazon SQS Message Attributes Amazon SQS Dead-Letter Queues Amazon SQS in the AWS CLI Command Reference Amazon Web Services General Reference Regions and Endpoints\",\n loadSpec: \"aws/sqs\"\n }, {\n name: \"ssm\",\n description: \"AWS Systems Manager AWS Systems Manager is a collection of capabilities that helps you automate management tasks such as collecting system inventory, applying operating system (OS) patches, automating the creation of Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. Systems Manager lets you remotely and securely manage the configuration of your managed instances. A managed instance is any Amazon Elastic Compute Cloud instance (EC2 instance), or any on-premises server or virtual machine (VM) in your hybrid environment that has been configured for Systems Manager. This reference is intended to be used with the AWS Systems Manager User Guide. To get started, verify prerequisites and configure managed instances. For more information, see Setting up AWS Systems Manager in the AWS Systems Manager User Guide. For information about other API actions you can perform on EC2 instances, see the Amazon EC2 API Reference. For information about how to use a Query API, see Making API requests\",\n loadSpec: \"aws/ssm\"\n }, {\n name: \"sso\",\n description: \"AWS Single Sign-On Portal is a web service that makes it easy for you to assign user access to AWS SSO resources such as the user portal. Users can get AWS account applications and roles assigned to them and get federated into the application. For general information about AWS SSO, see What is AWS Single Sign-On? in the AWS SSO User Guide. This API reference guide describes the AWS SSO Portal operations that you can call programmatically and includes detailed information on data types and errors. AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs provide a convenient way to create programmatic access to AWS SSO and other AWS services. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services\",\n loadSpec: \"aws/sso\"\n }, {\n name: \"sso-admin\",\n description: \"\",\n loadSpec: \"aws/sso-admin\"\n }, {\n name: \"sso-oidc\",\n description: \"AWS Single Sign-On (SSO) OpenID Connect (OIDC) is a web service that enables a client (such as AWS CLI or a native application) to register with AWS SSO. The service also enables the client to fetch the user\\u2019s access token upon successful authentication and authorization with AWS SSO. This service conforms with the OAuth 2.0 based implementation of the device authorization grant standard (https://tools.ietf.org/html/rfc8628). For general information about AWS SSO, see What is AWS Single Sign-On? in the AWS SSO User Guide. This API reference guide describes the AWS SSO OIDC operations that you can call programmatically and includes detailed information on data types and errors. AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms such as Java, Ruby, .Net, iOS, and Android. The SDKs provide a convenient way to create programmatic access to AWS SSO and other AWS services. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services\",\n loadSpec: \"aws/sso-oidc\"\n }, {\n name: \"stepfunctions\",\n description: \"AWS Step Functions AWS Step Functions is a service that lets you coordinate the components of distributed applications and microservices using visual workflows. You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues. Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on AWS, your own servers, or any system that has access to AWS. You can access and use Step Functions using the console, the AWS SDKs, or an HTTP API. For more information about Step Functions, see the AWS Step Functions Developer Guide\",\n loadSpec: \"aws/stepfunctions\"\n }, {\n name: \"storagegateway\",\n description: \"AWS Storage Gateway Service AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the AWS storage infrastructure. The service enables you to securely upload data to the AWS Cloud for cost effective backup and rapid disaster recovery. Use the following links to get started using the AWS Storage Gateway Service API Reference: AWS Storage Gateway required request headers: Describes the required headers that you must send with every POST request to AWS Storage Gateway. Signing requests: AWS Storage Gateway requires that you authenticate every request you send; this topic describes how sign such a request. Error responses: Provides reference information about AWS Storage Gateway errors. Operations in AWS Storage Gateway: Contains detailed descriptions of all AWS Storage Gateway operations, their request parameters, response elements, possible errors, and examples of requests and responses. AWS Storage Gateway endpoints and quotas: Provides a list of each AWS Region and the endpoints available for use with AWS Storage Gateway. AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected. IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs. For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following: arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG. A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee. For more information, see Announcement: Heads-up \\u2013 Longer AWS Storage Gateway volume and snapshot IDs coming in 2016\",\n loadSpec: \"aws/storagegateway\"\n }, {\n name: \"sts\",\n description: \"AWS Security Token Service AWS Security Token Service (STS) enables you to request temporary, limited-privilege credentials for AWS Identity and Access Management (IAM) users or for users that you authenticate (federated users). This guide provides descriptions of the STS API. For more information about using this service, see Temporary Security Credentials\",\n loadSpec: \"aws/sts\"\n }, {\n name: \"support\",\n description: \"AWS Support The AWS Support API reference is intended for programmers who need detailed information about the AWS Support operations and data types. This service enables you to manage your AWS Support cases programmatically. It uses HTTP methods that return results in JSON format. You must have a Business or Enterprise support plan to use the AWS Support API. If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support. The AWS Support service also exposes a set of AWS Trusted Advisor features. You can retrieve a list of checks and their descriptions, get check results, specify checks to refresh, and get the refresh status of checks. The following list describes the AWS Support case management operations: Service names, issue categories, and available severity levels. The DescribeServices and DescribeSeverityLevels operations return AWS service names, service codes, service categories, and problem severity levels. You use these values when you call the CreateCase operation. Case creation, case details, and case resolution. The CreateCase, DescribeCases, DescribeAttachment, and ResolveCase operations create AWS Support cases, retrieve information about cases, and resolve cases. Case communication. The DescribeCommunications, AddCommunicationToCase, and AddAttachmentsToSet operations retrieve and add communications and attachments to AWS Support cases. The following list describes the operations available from the AWS Support service for Trusted Advisor: DescribeTrustedAdvisorChecks returns the list of checks that run against your AWS resources. Using the checkId for a specific check returned by DescribeTrustedAdvisorChecks, you can call DescribeTrustedAdvisorCheckResult to obtain the results for the check that you specified. DescribeTrustedAdvisorCheckSummaries returns summarized results for one or more Trusted Advisor checks. RefreshTrustedAdvisorCheck requests that Trusted Advisor rerun a specified check. DescribeTrustedAdvisorCheckRefreshStatuses reports the refresh status of one or more checks. For authentication of requests, AWS Support uses Signature Version 4 Signing Process. See About the AWS Support API in the AWS Support User Guide for information about how to use this service to create and manage your support cases, and how to call Trusted Advisor for results of checks on your resources\",\n loadSpec: \"aws/support\"\n }, {\n name: \"swf\",\n description: \"Amazon Simple Workflow Service The Amazon Simple Workflow Service (Amazon SWF) makes it easy to build applications that use Amazon's cloud to coordinate work across distributed components. In Amazon SWF, a task represents a logical unit of work that is performed by a component of your workflow. Coordinating tasks in a workflow involves managing intertask dependencies, scheduling, and concurrency in accordance with the logical flow of the application. Amazon SWF gives you full control over implementing tasks and coordinating them without worrying about underlying complexities such as tracking their progress and maintaining their state. This documentation serves as reference only. For a broader overview of the Amazon SWF programming model, see the Amazon SWF Developer Guide\",\n loadSpec: \"aws/swf\"\n }, {\n name: \"synthetics\",\n description: \"Amazon CloudWatch Synthetics You can use Amazon CloudWatch Synthetics to continually monitor your services. You can create and manage canaries, which are modular, lightweight scripts that monitor your endpoints and APIs from the outside-in. You can set up your canaries to run 24 hours a day, once per minute. The canaries help you check the availability and latency of your web services and troubleshoot anomalies by investigating load time data, screenshots of the UI, logs, and metrics. The canaries seamlessly integrate with CloudWatch ServiceLens to help you trace the causes of impacted nodes in your applications. For more information, see Using ServiceLens to Monitor the Health of Your Applications in the Amazon CloudWatch User Guide. Before you create and manage canaries, be aware of the security considerations. For more information, see Security Considerations for Synthetics Canaries\",\n loadSpec: \"aws/synthetics\"\n }, {\n name: \"textract\",\n description: \"Amazon Textract detects and analyzes text in documents and converts it into machine-readable text. This is the API reference documentation for Amazon Textract\",\n loadSpec: \"aws/textract\"\n }, {\n name: \"timestream-query\",\n description: \"\",\n loadSpec: \"aws/timestream-query\"\n }, {\n name: \"timestream-write\",\n description: \"Amazon Timestream is a fast, scalable, fully managed time series database service that makes it easy to store and analyze trillions of time series data points per day. With Timestream, you can easily store and analyze IoT sensor data to derive insights from your IoT applications. You can analyze industrial telemetry to streamline equipment management and maintenance. You can also store and analyze log data and metrics to improve the performance and availability of your applications. Timestream is built from the ground up to effectively ingest, process, and store time series data. It organizes data to optimize query processing. It automatically scales based on the volume of data ingested and on the query volume to ensure you receive optimal performance while inserting and querying data. As your data grows over time, Timestream\\u2019s adaptive query processing engine spans across storage tiers to provide fast analysis while reducing costs\",\n loadSpec: \"aws/timestream-write\"\n }, {\n name: \"transcribe\",\n description: \"Operations and objects for transcribing speech to text\",\n loadSpec: \"aws/transcribe\"\n }, {\n name: \"transfer\",\n description: \"AWS Transfer Family is a fully managed service that enables the transfer of files over the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3). AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with AWS services for processing, analytics, machine learning, and archiving. Getting started with AWS Transfer Family is easy since there is no infrastructure to buy and set up\",\n loadSpec: \"aws/transfer\"\n }, {\n name: \"translate\",\n description: \"Provides translation between one source language and another of the same set of languages\",\n loadSpec: \"aws/translate\"\n }, {\n name: \"waf\",\n description: \"This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide. For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use. This is the AWS WAF Classic API Reference for using AWS WAF Classic with Amazon CloudFront. The AWS WAF Classic actions and data types listed in the reference are available for protecting Amazon CloudFront distributions. You can use these actions and data types via the endpoint waf.amazonaws.com. This guide is for developers who need detailed information about the AWS WAF Classic API actions, data types, and errors. For detailed information about AWS WAF Classic features and an overview of how to use the AWS WAF Classic API, see the AWS WAF Classic in the developer guide\",\n loadSpec: \"aws/waf\"\n }, {\n name: \"waf-regional\",\n description: \"This is AWS WAF Classic Regional documentation. For more information, see AWS WAF Classic in the developer guide. For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use. This is the AWS WAF Regional Classic API Reference for using AWS WAF Classic with the AWS resources, Elastic Load Balancing (ELB) Application Load Balancers and API Gateway APIs. The AWS WAF Classic actions and data types listed in the reference are available for protecting Elastic Load Balancing (ELB) Application Load Balancers and API Gateway APIs. You can use these actions and data types by means of the endpoints listed in AWS Regions and Endpoints. This guide is for developers who need detailed information about the AWS WAF Classic API actions, data types, and errors. For detailed information about AWS WAF Classic features and an overview of how to use the AWS WAF Classic API, see the AWS WAF Classic in the developer guide\",\n loadSpec: \"aws/waf-regional\"\n }, {\n name: \"wafv2\",\n description: \"This is the latest version of the AWS WAF API, released in November, 2019. The names of the entities that you use to access this API, like endpoints and namespaces, all have the versioning information added, like \\\"V2\\\" or \\\"v2\\\", to distinguish from the prior version. We recommend migrating your resources to this version, because it has a number of significant improvements. If you used AWS WAF prior to this release, you can't use this AWS WAFV2 API to access any AWS WAF resources that you created before. You can access your old rules, web ACLs, and other AWS WAF resources only through the AWS WAF Classic APIs. The AWS WAF Classic APIs have retained the prior names, endpoints, and namespaces. For information, including how to migrate your AWS WAF resources to this version, see the AWS WAF Developer Guide. AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API. AWS WAF also lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, the API Gateway REST API, CloudFront distribution, the Application Load Balancer, or the AWS AppSync GraphQL API responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You also can configure CloudFront to return a custom error page when a request is blocked. This API guide is for developers who need detailed information about AWS WAF API actions, data types, and errors. For detailed information about AWS WAF features and an overview of how to use AWS WAF, see the AWS WAF Developer Guide. You can make calls using the endpoints listed in AWS Service Endpoints for AWS WAF. For regional applications, you can use any of the endpoints in the list. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API. For AWS CloudFront applications, you must use the API endpoint listed for US East (N. Virginia): us-east-1. Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs. We currently provide two versions of the AWS WAF API: this API and the prior versions, the classic AWS WAF APIs. This new API provides the same functionality as the older versions, with the following major improvements: You use one API for both global and regional applications. Where you need to distinguish the scope, you specify a Scope parameter and set it to CLOUDFRONT or REGIONAL. You can define a Web ACL or rule group with a single call, and update it with a single call. You define all rule specifications in JSON format, and pass them to your rule group or Web ACL calls. The limits AWS WAF places on the use of rules more closely reflects the cost of running each type of rule. Rule groups include capacity settings, so you know the maximum cost of a rule group when you use it\",\n loadSpec: \"aws/wafv2\"\n }, {\n name: \"wellarchitected\",\n description: \"AWS Well-Architected Tool This is the AWS Well-Architected Tool API Reference. The AWS Well-Architected Tool API provides programmatic access to the AWS Well-Architected Tool in the AWS Management Console. For information about the AWS Well-Architected Tool, see the AWS Well-Architected Tool User Guide\",\n loadSpec: \"aws/wellarchitected\"\n }, {\n name: \"workdocs\",\n description: \"The WorkDocs API is designed for the following use cases: File Migration: File migration applications are supported for users who want to migrate their files from an on-premises or off-premises file system or service. Users can insert files into a user directory structure, as well as allow for basic metadata changes, such as modifications to the permissions of files. Security: Support security applications are supported for users who have additional security needs, such as antivirus or data loss prevention. The API actions, along with AWS CloudTrail, allow these applications to detect when changes occur in Amazon WorkDocs. Then, the application can take the necessary actions and replace the target file. If the target file violates the policy, the application can also choose to email the user. eDiscovery/Analytics: General administrative applications are supported, such as eDiscovery and analytics. These applications can choose to mimic or record the actions in an Amazon WorkDocs site, along with AWS CloudTrail, to replicate data for eDiscovery, backup, or analytical applications. All Amazon WorkDocs API actions are Amazon authenticated and certificate-signed. They not only require the use of the AWS SDK, but also allow for the exclusive use of IAM users and roles to help facilitate access, trust, and permission policies. By creating a role and allowing an IAM user to access the Amazon WorkDocs site, the IAM user gains full administrative visibility into the entire Amazon WorkDocs site (or as set in the IAM policy). This includes, but is not limited to, the ability to modify file permissions and upload any file to any user. This allows developers to perform the three use cases above, as well as give users the ability to grant access on a selective basis using the IAM model\",\n loadSpec: \"aws/workdocs\"\n }, {\n name: \"worklink\",\n description: \"Amazon WorkLink is a cloud-based service that provides secure access to internal websites and web apps from iOS and Android phones. In a single step, your users, such as employees, can access internal websites as efficiently as they access any other public website. They enter a URL in their web browser, or choose a link to an internal website in an email. Amazon WorkLink authenticates the user's access and securely renders authorized internal web content in a secure rendering service in the AWS cloud. Amazon WorkLink doesn't download or store any internal web content on mobile devices\",\n loadSpec: \"aws/worklink\"\n }, {\n name: \"workmail\",\n description: \"Amazon WorkMail is a secure, managed business email and calendaring service with support for existing desktop and mobile email clients. You can access your email, contacts, and calendars using Microsoft Outlook, your browser, or other native iOS and Android email applications. You can integrate WorkMail with your existing corporate directory and control both the keys that encrypt your data and the location in which your data is stored. The WorkMail API is designed for the following scenarios: Listing and describing organizations Managing users Managing groups Managing resources All WorkMail API operations are Amazon-authenticated and certificate-signed. They not only require the use of the AWS SDK, but also allow for the exclusive use of AWS Identity and Access Management users and roles to help facilitate access, trust, and permission policies. By creating a role and allowing an IAM user to access the WorkMail site, the IAM user gains full administrative visibility into the entire WorkMail organization (or as set in the IAM policy). This includes, but is not limited to, the ability to create, update, and delete users, groups, and resources. This allows developers to perform the scenarios listed above, as well as give users the ability to grant access on a selective basis using the IAM model\",\n loadSpec: \"aws/workmail\"\n }, {\n name: \"workmailmessageflow\",\n description: \"The WorkMail Message Flow API provides access to email messages as they are being sent and received by a WorkMail organization\",\n loadSpec: \"aws/workmailmessageflow\"\n }, {\n name: \"workspaces\",\n description: \"Amazon WorkSpaces Service Amazon WorkSpaces enables you to provision virtual, cloud-based Microsoft Windows and Amazon Linux desktops for your users\",\n loadSpec: \"aws/workspaces\"\n }, {\n name: \"xray\",\n description: \"AWS X-Ray provides APIs for managing debug traces and retrieving service maps and other data created by processing those traces\",\n loadSpec: \"aws/xray\"\n }, {\n name: \"s3api\",\n description: \"\",\n loadSpec: \"aws/s3api\"\n }, {\n name: \"s3\",\n description: 'This section explains prominent concepts and notations in the set of high-level S3 commands provided.\\n\\nPath Argument Type\\n++++++++++++++++++\\n\\nWhenever using a command, at least one path argument must be specified. There\\nare two types of path arguments: ``LocalPath`` and ``S3Uri``.\\n\\n``LocalPath``: represents the path of a local file or directory. It can be\\nwritten as an absolute path or relative path.\\n\\n``S3Uri``: represents the location of a S3 object, prefix, or bucket. This\\nmust be written in the form ``s3://mybucket/mykey`` where ``mybucket`` is\\nthe specified S3 bucket, ``mykey`` is the specified S3 key. The path argument\\nmust begin with ``s3://`` in order to denote that the path argument refers to\\na S3 object. Note that prefixes are separated by forward slashes. For\\nexample, if the S3 object ``myobject`` had the prefix ``myprefix``, the\\nS3 key would be ``myprefix/myobject``, and if the object was in the bucket\\n``mybucket``, the ``S3Uri`` would be ``s3://mybucket/myprefix/myobject``.\\n\\n``S3Uri`` also supports S3 access points. To specify an access point, this\\nvalue must be of the form ``s3:///``. For example if\\nthe access point ``myaccesspoint`` to be used has the ARN:\\n``arn:aws:s3:us-west-2:123456789012:accesspoint/myaccesspoint`` and the object\\nbeing accessed has the key ``mykey``, then the ``S3URI`` used must be:\\n``s3://arn:aws:s3:us-west-2:123456789012:accesspoint/myaccesspoint/mykey``.\\nSimilar to bucket names, you can also use prefixes with access point ARNs for\\nthe ``S3Uri``. For example:\\n``s3://arn:aws:s3:us-west-2:123456789012:accesspoint/myaccesspoint/myprefix/``\\n\\nThe higher level ``s3`` commands do **not** support access point object ARNs.\\nFor example, if the following was specified:\\n``s3://arn:aws:s3:us-west-2:123456789012:accesspoint/myaccesspoint/object/mykey``\\nthe ``S3URI`` will resolve to the object key ``object/mykey``\\n\\n\\n\\nOrder of Path Arguments\\n+++++++++++++++++++++++\\n\\nEvery command takes one or two positional path arguments. The first path\\nargument represents the source, which is the local file/directory or S3\\nobject/prefix/bucket that is being referenced. If there is a second path\\nargument, it represents the destination, which is the local file/directory\\nor S3 object/prefix/bucket that is being operated on. Commands with only\\none path argument do not have a destination because the operation is being\\nperformed only on the source.\\n\\n\\nSingle Local File and S3 Object Operations\\n++++++++++++++++++++++++++++++++++++++++++\\n\\nSome commands perform operations only on single files and S3 objects. The\\nfollowing commands are single file/object operations if no ``--recursive``\\nflag is provided.\\n\\n * ``cp``\\n * ``mv``\\n * ``rm``\\n\\nFor this type of operation, the first path argument, the source, must exist\\nand be a local file or S3 object. The second path argument, the destination,\\ncan be the name of a local file, local directory, S3 object, S3 prefix,\\nor S3 bucket.\\n\\nThe destination is indicated as a local directory, S3 prefix, or S3 bucket\\nif it ends with a forward slash or back slash. The use of slash depends\\non the path argument type. If the path argument is a ``LocalPath``,\\nthe type of slash is the separator used by the operating system. If the\\npath is a ``S3Uri``, the forward slash must always be used. If a slash\\nis at the end of the destination, the destination file or object will\\nadopt the name of the source file or object. Otherwise, if there is no\\nslash at the end, the file or object will be saved under the name provided.\\nSee examples in ``cp`` and ``mv`` to illustrate this description.\\n\\n\\nDirectory and S3 Prefix Operations\\n++++++++++++++++++++++++++++++++++\\n\\nSome commands only perform operations on the contents of a local directory\\nor S3 prefix/bucket. Adding or omitting a forward slash or back slash to\\nthe end of any path argument, depending on its type, does not affect the\\nresults of the operation. The following commands will always result in\\na directory or S3 prefix/bucket operation:\\n\\n* ``sync``\\n* ``mb``\\n* ``rb``\\n* ``ls``\\n\\n\\nUse of Exclude and Include Filters\\n++++++++++++++++++++++++++++++++++\\n\\nCurrently, there is no support for the use of UNIX style wildcards in\\na command\\'s path arguments. However, most commands have ``--exclude \"\"``\\nand ``--include \"\"`` parameters that can achieve the desired result.\\nThese parameters perform pattern matching to either exclude or include\\na particular file or object. The following pattern symbols are supported.\\n\\n * ``*``: Matches everything\\n * ``?``: Matches any single character\\n * ``[sequence]``: Matches any character in ``sequence``\\n * ``[!sequence]``: Matches any character not in ``sequence``\\n\\nAny number of these parameters can be passed to a command. You can do this by\\nproviding an ``--exclude`` or ``--include`` argument multiple times, e.g.\\n``--include \"*.txt\" --include \"*.png\"``.\\nWhen there are multiple filters, the rule is the filters that appear later in\\nthe command take precedence over filters that appear earlier in the command.\\nFor example, if the filter parameters passed to the command were\\n\\n::\\n\\n --exclude \"*\" --include \"*.txt\"\\n\\nAll files will be excluded from the command except for files ending with\\n``.txt`` However, if the order of the filter parameters was changed to\\n\\n::\\n\\n --include \"*.txt\" --exclude \"*\"\\n\\nAll files will be excluded from the command.\\n\\nEach filter is evaluated against the **source directory**. If the source\\nlocation is a file instead of a directory, the directory containing the file is\\nused as the source directory. For example, suppose you had the following\\ndirectory structure::\\n\\n /tmp/foo/\\n .git/\\n |---config\\n |---description\\n foo.txt\\n bar.txt\\n baz.jpg\\n\\nIn the command ``aws s3 sync /tmp/foo s3://bucket/`` the source directory is\\n``/tmp/foo``. Any include/exclude filters will be evaluated with the source\\ndirectory prepended. Below are several examples to demonstrate this.\\n\\nGiven the directory structure above and the command\\n``aws s3 cp /tmp/foo s3://bucket/ --recursive --exclude \".git/*\"``, the\\nfiles ``.git/config`` and ``.git/description`` will be excluded from the\\nfiles to upload because the exclude filter ``.git/*`` will have the source\\nprepended to the filter. This means that::\\n\\n /tmp/foo/.git/* -> /tmp/foo/.git/config (matches, should exclude)\\n /tmp/foo/.git/* -> /tmp/foo/.git/description (matches, should exclude)\\n /tmp/foo/.git/* -> /tmp/foo/foo.txt (does not match, should include)\\n /tmp/foo/.git/* -> /tmp/foo/bar.txt (does not match, should include)\\n /tmp/foo/.git/* -> /tmp/foo/baz.jpg (does not match, should include)\\n\\nThe command ``aws s3 cp /tmp/foo/ s3://bucket/ --recursive --exclude \"ba*\"``\\nwill exclude ``/tmp/foo/bar.txt`` and ``/tmp/foo/baz.jpg``::\\n\\n /tmp/foo/ba* -> /tmp/foo/.git/config (does not match, should include)\\n /tmp/foo/ba* -> /tmp/foo/.git/description (does not match, should include)\\n /tmp/foo/ba* -> /tmp/foo/foo.txt (does not match, should include)\\n /tmp/foo/ba* -> /tmp/foo/bar.txt (matches, should exclude)\\n /tmp/foo/ba* -> /tmp/foo/baz.jpg (matches, should exclude)\\n\\n\\nNote that, by default, *all files are included*. This means that\\nproviding **only** an ``--include`` filter will not change what\\nfiles are transferred. ``--include`` will only re-include files that\\nhave been excluded from an ``--exclude`` filter. If you only want\\nto upload files with a particular extension, you need to first exclude\\nall files, then re-include the files with the particular extension.\\nThis command will upload **only** files ending with ``.jpg``::\\n\\n aws s3 cp /tmp/foo/ s3://bucket/ --recursive --exclude \"*\" --include \"*.jpg\"\\n\\nIf you wanted to include both ``.jpg`` files as well as ``.txt`` files you\\ncan run::\\n\\n aws s3 cp /tmp/foo/ s3://bucket/ --recursive \\\\\\n --exclude \"*\" --include \"*.jpg\" --include \"*.txt\"\\n',\n loadSpec: \"aws/s3\"\n }, {\n name: \"deploy\",\n description: \"AWS CodeDeploy AWS CodeDeploy is a deployment service that automates application deployments to Amazon EC2 instances, on-premises instances running in your own facility, serverless AWS Lambda functions, or applications in an Amazon ECS service. You can deploy a nearly unlimited variety of application content, such as an updated Lambda function, updated applications in an Amazon ECS service, code, web and configuration files, executables, packages, scripts, multimedia files, and so on. AWS CodeDeploy can deploy application content stored in Amazon S3 buckets, GitHub repositories, or Bitbucket repositories. You do not need to make changes to your existing code before you can use AWS CodeDeploy. AWS CodeDeploy makes it easier for you to rapidly release new features, helps you avoid downtime during application deployment, and handles the complexity of updating your applications, without many of the risks associated with error-prone manual deployments. AWS CodeDeploy Components Use the information in this guide to help you work with the following AWS CodeDeploy components: Application: A name that uniquely identifies the application you want to deploy. AWS CodeDeploy uses this name, which functions as a container, to ensure the correct combination of revision, deployment configuration, and deployment group are referenced during a deployment. Deployment group: A set of individual instances, CodeDeploy Lambda deployment configuration settings, or an Amazon ECS service and network details. A Lambda deployment group specifies how to route traffic to a new version of a Lambda function. An Amazon ECS deployment group specifies the service created in Amazon ECS to deploy, a load balancer, and a listener to reroute production traffic to an updated containerized application. An EC2/On-premises deployment group contains individually tagged instances, Amazon EC2 instances in Amazon EC2 Auto Scaling groups, or both. All deployment groups can specify optional trigger, alarm, and rollback settings. Deployment configuration: A set of deployment rules and deployment success and failure conditions used by AWS CodeDeploy during a deployment. Deployment: The process and the components used when updating a Lambda function, a containerized application in an Amazon ECS service, or of installing content on one or more instances. Application revisions: For an AWS Lambda deployment, this is an AppSpec file that specifies the Lambda function to be updated and one or more functions to validate deployment lifecycle events. For an Amazon ECS deployment, this is an AppSpec file that specifies the Amazon ECS task definition, container, and port where production traffic is rerouted. For an EC2/On-premises deployment, this is an archive file that contains source content\\u2014source code, webpages, executable files, and deployment scripts\\u2014along with an AppSpec file. Revisions are stored in Amazon S3 buckets or GitHub repositories. For Amazon S3, a revision is uniquely identified by its Amazon S3 object key and its ETag, version, or both. For GitHub, a revision is uniquely identified by its commit ID. This guide also contains information to help you get details about the instances in your deployments, to make on-premises instances available for AWS CodeDeploy deployments, to get details about a Lambda function deployment, and to get details about Amazon ECS service deployments. AWS CodeDeploy Information Resources AWS CodeDeploy User Guide AWS CodeDeploy API Reference Guide AWS CLI Reference for AWS CodeDeploy AWS CodeDeploy Developer Forum\",\n loadSpec: \"aws/deploy\"\n }, {\n name: \"configservice\",\n description: \"AWS Config AWS Config provides a way to keep track of the configurations of all the AWS resources associated with your AWS account. You can use AWS Config to get the current and historical configurations of each AWS resource and also to get information about the relationship between the resources. An AWS resource can be an Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store (EBS) volume, an elastic network Interface (ENI), or a security group. For a complete list of resources currently supported by AWS Config, see Supported AWS Resources. You can access and manage AWS Config through the AWS Management Console, the AWS Command Line Interface (AWS CLI), the AWS Config API, or the AWS SDKs for AWS Config. This reference guide contains documentation for the AWS Config API and the AWS CLI commands that you can use to manage AWS Config. The AWS Config API uses the Signature Version 4 protocol for signing requests. For more information about how to sign a request with this protocol, see Signature Version 4 Signing Process. For detailed information about AWS Config features and their associated actions or commands, as well as how to work with AWS Management Console, see What Is AWS Config in the AWS Config Developer Guide\",\n loadSpec: \"aws/configservice\"\n }, {\n name: \"opsworks-cm\",\n description: \"AWS OpsWorks CM AWS OpsWorks for configuration management (CM) is a service that runs and manages configuration management servers. You can use AWS OpsWorks CM to create and manage AWS OpsWorks for Chef Automate and AWS OpsWorks for Puppet Enterprise servers, and add or remove nodes for the servers to manage. Glossary of terms Server: A configuration management server that can be highly-available. The configuration management server runs on an Amazon Elastic Compute Cloud (EC2) instance, and may use various other AWS services, such as Amazon Relational Database Service (RDS) and Elastic Load Balancing. A server is a generic abstraction over the configuration manager that you want to use, much like Amazon RDS. In AWS OpsWorks CM, you do not start or stop servers. After you create servers, they continue to run until they are deleted. Engine: The engine is the specific configuration manager that you want to use. Valid values in this release include ChefAutomate and Puppet. Backup: This is an application-level backup of the data that the configuration manager stores. AWS OpsWorks CM creates an S3 bucket for backups when you launch the first server. A backup maintains a snapshot of a server's configuration-related attributes at the time the backup starts. Events: Events are always related to a server. Events are written during server creation, when health checks run, when backups are created, when system maintenance is performed, etc. When you delete a server, the server's events are also deleted. Account attributes: Every account has attributes that are assigned in the AWS OpsWorks CM database. These attributes store information about configuration limits (servers, backups, etc.) and your customer account. Endpoints AWS OpsWorks CM supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Your servers can only be accessed or managed within the endpoint in which they are created. opsworks-cm.us-east-1.amazonaws.com opsworks-cm.us-east-2.amazonaws.com opsworks-cm.us-west-1.amazonaws.com opsworks-cm.us-west-2.amazonaws.com opsworks-cm.ap-northeast-1.amazonaws.com opsworks-cm.ap-southeast-1.amazonaws.com opsworks-cm.ap-southeast-2.amazonaws.com opsworks-cm.eu-central-1.amazonaws.com opsworks-cm.eu-west-1.amazonaws.com For more information, see AWS OpsWorks endpoints and quotas in the AWS General Reference. Throttling limits All API operations allow for five requests per second with a burst of 10 requests per second\",\n loadSpec: \"aws/opsworks-cm\"\n }, {\n name: \"runtime.sagemaker\",\n description: \"The Amazon SageMaker runtime API\",\n loadSpec: \"aws/runtime.sagemaker\"\n }, {\n name: \"history\",\n description: \"Commands to interact with the history of AWS CLI commands ran over time. To record the history of AWS CLI commands set ``cli_history`` to ``enabled`` in the ``~/.aws/config`` file. This can be done by running:\\n\\n``$ aws configure set cli_history enabled``\",\n loadSpec: \"aws/history\"\n }]\n },\n s = n;\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/aws.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/bundle.js": +/*!************************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/bundle.js ***! + \************************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ o)\n/* harmony export */ });\nvar e = {\n script: [\"bundle\", \"list\", \"--name-only\"],\n postProcess: t => t.split(\"\\n\").map(n => ({\n name: n,\n icon: \"\\u{1F4E6}\",\n description: \"Gem\"\n }))\n },\n i = {\n name: \"bundle\",\n description: \"Ruby Dependency Management\",\n subcommands: [{\n name: \"install\",\n description: \"Install the gems specified by the Gemfile or Gemfile.lock\",\n options: [{\n name: \"--binstubs\",\n args: {\n template: \"folders\"\n },\n description: \"Create binstubs in dir\"\n }, {\n name: \"--clean\",\n description: \"Remove unused gems after install\"\n }, {\n name: \"--deployment\",\n description: \"For Production and CI use\"\n }, {\n name: [\"--force\", \"--redownload\"],\n description: \"Redownload all gems\"\n }, {\n name: \"--frozen\",\n description: \"Do not allow lock file to update\"\n }, {\n name: \"--full-index\",\n description: \"Cache the full index locally\"\n }, {\n name: \"--gemfile\",\n args: {\n template: \"filepaths\"\n },\n description: \"The gemfile to use\"\n }, {\n name: \"--jobs\",\n args: {},\n description: \"Maximum number of parallel installs\"\n }, {\n name: \"--local\",\n description: \"Use only gems already downloaded or cached\"\n }, {\n name: \"--no-cache\",\n description: \"Do not use vendor/cache\"\n }, {\n name: \"--no-prune\",\n description: \"Do not remove stale gems\"\n }, {\n name: \"--path\",\n args: {\n template: \"folders\"\n },\n description: \"Path the install gems too\"\n }, {\n name: \"--quiet\",\n description: \"Do not print to stdout\"\n }, {\n name: \"--retry\",\n args: {},\n description: \"Retry failed network requests N times\"\n }, {\n name: \"--shebang\",\n args: {},\n description: \"Uses the specified ruby executable for binstubs\"\n }, {\n name: \"--standalone\",\n args: {},\n description: \"Makes a bundle that can work without depending on Rubygems or Bundler at runtime\"\n }, {\n name: \"--system\",\n description: \"Use system Rubygems location\"\n }, {\n name: \"--trust-policy\",\n args: {},\n description: \"Apply the Rubygems security policy\"\n }, {\n name: \"--with\",\n args: {},\n description: \"Groups to install\"\n }, {\n name: \"--without\",\n args: {},\n description: \"Groups to NOT install\"\n }]\n }, {\n name: \"update\",\n description: \"Update dependencies to their latest versions\",\n args: {\n name: \"gem\",\n generators: e,\n isOptional: !0\n },\n options: [{\n name: \"--all\",\n description: \"Update all gems specified in Gemfile\"\n }, {\n name: [\"--group\", \"-g\"],\n description: \"Only update the gems in the specified group\",\n args: {}\n }, {\n name: \"--source\",\n description: \"The name of a :git or :path source used in the Gemfile\",\n args: {}\n }, {\n name: \"--local\",\n description: \"Use only gems already downloaded or cached\"\n }, {\n name: \"--ruby\",\n description: \"Update the locked version of Ruby to the current version of Ruby\"\n }, {\n name: \"--bundler\",\n description: \"Update the locked version of bundler to the invoked bundler version\"\n }, {\n name: \"--full-index\",\n description: \"Fall back to using the single-file index of all gems\"\n }, {\n name: [\"--jobs\", \"-j\"],\n description: \"Specify the number of jobs to run in parallel. The default is 1\",\n args: {}\n }, {\n name: \"--retry\",\n description: \"Retry failed network or git requests for number times\",\n args: {}\n }, {\n name: \"--quiet\",\n description: \"Only output warnings and errors\"\n }, {\n name: [\"--force\", \"--redownload\"],\n description: \"Force downloading every gem\"\n }, {\n name: \"--patch\",\n description: \"Prefer updating only to next patch version\"\n }, {\n name: \"--minor\",\n description: \"Prefer updating only to next minor version\"\n }, {\n name: \"--major\",\n description: \"Prefer updating to next major version (default)\"\n }, {\n name: \"--strict\",\n description: \"Do not allow any gem to be updated past latest --patch | --minor | --major\"\n }, {\n name: \"--conservative\",\n description: \"Do not allow shared dependencies to be updated\"\n }]\n }, {\n name: \"package\",\n description: \"Package the .gem files required by your application into the vendor/cache directory\"\n }, {\n name: \"exec\",\n description: \"Execute a command in the context of the bundle\",\n options: [{\n name: \"--keep-file-descriptors\",\n description: \"Pass all file descriptors to the new process\"\n }],\n args: {\n isCommand: !0\n }\n }, {\n name: \"config\",\n args: {}\n }, {\n name: \"help\"\n }, {\n name: \"add\",\n description: \"Add gem to the Gemfile and run bundle install\",\n args: {},\n options: [{\n name: [\"--version\", \"-v\"],\n description: \"Specify version requirements\"\n }, {\n name: [\"--group\", \"-g\"],\n description: \"Specify the group(s) for the added gem\"\n }, {\n name: [\"--source\", \"-s\"],\n description: \"Specify the source\"\n }, {\n name: \"--skip-install\",\n description: \"Adds the gem to the Gemfile but does not install it\"\n }, {\n name: \"--optimistic\",\n description: \"Adds optimistic declaration of version\"\n }, {\n name: \"--strict\",\n description: \"Adds strict declaration of version\"\n }]\n }, {\n name: \"binstubs\",\n description: \"Install the binstubs of the listed gems\",\n args: {},\n options: [{\n name: \"--force\",\n description: \"Overwrite existing binstubs\"\n }, {\n name: \"--path\",\n description: \"The location to install the specified binstubs to\"\n }, {\n name: \"--standalone\",\n description: \"Makes binstubs that can work without depending on Rubygems or Bundler at runtime\"\n }, {\n name: \"--shebang\",\n description: \"Specify a different shebang executable name than the default\"\n }]\n }, {\n name: \"check\",\n description: \"Determine whether the requirements for your application are installed and available to Bundler\",\n options: [{\n name: \"--dry-run\",\n description: \"Locks the Gemfile before running the command\"\n }, {\n name: \"--gemfile\",\n description: \"Use the specified gemfile instead of the Gemfile\"\n }, {\n name: \"--path\",\n description: \"Specify a different path than the system default\"\n }]\n }, {\n name: \"show\",\n description: \"Show the source location of a particular gem in the bundle\",\n args: {\n name: \"gem\",\n generators: e,\n isOptional: !0\n },\n options: [{\n name: \"--paths\",\n description: \"List the paths of all gems that are required by your Gemfile\"\n }]\n }, {\n name: \"outdated\",\n description: \"Show all of the outdated gems in the current bundle\",\n options: [{\n name: \"--local\",\n description: \"Do not attempt to fetch gems remotely and use the gem cache instead\"\n }, {\n name: \"--pre\",\n description: \"Check for newer pre-release gems\"\n }, {\n name: \"--source\",\n description: \"Check against a specific source\"\n }, {\n name: \"--strict\",\n description: \"Only list newer versions allowed by your Gemfile requirements\"\n }, {\n name: [\"--parseable\", \"--porcelain\"],\n description: \"Use minimal formatting for more parseable output\"\n }, {\n name: \"--group\",\n description: \"List gems from a specific group\"\n }, {\n name: \"--groups\",\n description: \"List gems organized by groups\"\n }, {\n name: \"--update-strict\",\n description: \"Strict conservative resolution, do not allow any gem to be updated past latest --patch | --minor| --major\"\n }, {\n name: \"--minor\",\n description: \"Prefer updating only to next minor version\"\n }, {\n name: \"--major\",\n description: \"Prefer updating to next major version (default)\"\n }, {\n name: \"--patch\",\n description: \"Prefer updating only to next patch version\"\n }, {\n name: \"--filter-major\",\n description: \"Only list major newer versions\"\n }, {\n name: \"--filter-minor\",\n description: \"Only list minor newer versions\"\n }, {\n name: \"--filter-patch\",\n description: \"Only list patch newer versions\"\n }, {\n name: \"--only-explicit\",\n description: \"Only list gems specified in your Gemfile, not their dependencies\"\n }]\n }, {\n name: \"console\",\n description: \"Start an IRB session in the current bundle\"\n }, {\n name: \"open\",\n description: \"Open an installed gem in the editor\",\n args: {\n name: \"gem\",\n generators: e\n }\n }, {\n name: \"lock\",\n description: \"Generate a lockfile for your dependencies\",\n options: [{\n name: \"--update\",\n description: \"Ignores the existing lockfile\",\n args: {}\n }, {\n name: \"--local\",\n description: \"Do not attempt to connect to rubygems.org\"\n }, {\n name: \"--print\",\n description: \"Prints the lockfile to STDOUT instead of writing to the file\\n system\"\n }, {\n name: \"--lockfile\",\n description: \"The path where the lockfile should be written to\",\n args: {\n name: \"path\"\n }\n }, {\n name: \"--full-index\",\n description: \"Fall back to using the single-file index of all gems\"\n }, {\n name: \"--add-platform\",\n description: \"Add a new platform to the lockfile, re-resolving for the addi-\\n tion of that platform\"\n }, {\n name: \"--remove-platform\",\n description: \"Remove a platform from the lockfile\"\n }, {\n name: \"--patch\",\n description: \"If updating, prefer updating only to next patch version\"\n }, {\n name: \"--minor\",\n description: \"If updating, prefer updating only to next minor version\"\n }, {\n name: \"--major\",\n description: \"If updating, prefer updating to next major version (default)\"\n }, {\n name: \"--strict\",\n description: \"If updating, do not allow any gem to be updated past latest --patch | --minor | --major\"\n }, {\n name: \"--conservative\",\n description: \"If updating, use bundle install conservative update behavior and do not allow shared dependencies to be updated\"\n }]\n }, {\n name: \"viz\",\n description: \"Generate a visual representation of your dependencies\",\n options: [{\n name: [\"--file\", \"-f\"],\n description: \"The name to use for the generated file. See --format option\"\n }, {\n name: [\"--format\", \"-F\"],\n description: \"This is output format option\"\n }, {\n name: [\"--requirements\", \"-R\"],\n description: \"Set to show the version of each required dependency\"\n }, {\n name: [\"--version\", \"-v\"],\n description: \"Set to show each gem version\"\n }, {\n name: [\"--without\", \"-W\"],\n description: \"Exclude gems that are part of the specified named group\"\n }]\n }, {\n name: \"init\",\n description: \"Generate a simple Gemfile, placed in the current directory\",\n options: [{\n name: \"--gemspec\",\n description: \"Use the specified .gemspec to create the Gemfile\"\n }]\n }, {\n name: \"gem\",\n description: \"Create a simple gem, suitable for development with Bundler\",\n options: [{\n name: [\"--exe\", \"-b\", \"--bin\"],\n description: \"Specify that Bundler should create a binary executable\"\n }, {\n name: \"--no-exe\",\n description: \"Do not create a binary\"\n }, {\n name: \"--coc\",\n description: \"Add a CODE_OF_CONDUCT.md file to the root of the generated project\"\n }, {\n name: \"--no-coc\",\n description: \"Do not create a CODE_OF_CONDUCT.md\"\n }, {\n name: \"--ext\",\n description: \"Add boilerplate for C extension code to the generated project\"\n }, {\n name: \"--no-ext\",\n description: \"Do not add C extension code\"\n }, {\n name: \"--mit\",\n description: \"Add an MIT license\"\n }, {\n name: \"--no-mit\",\n description: \"Do not create a LICENSE.txt\"\n }, {\n name: [\"-t\", \"--test\"],\n description: \"Specify the test framework that Bundler should use\",\n args: {}\n }, {\n name: [\"-e\", \"--edit\"],\n description: \"Open the resulting gemspec in EDITOR\",\n args: {}\n }]\n }, {\n name: \"platform\",\n description: \"Display platform compatibility information\",\n options: [{\n name: \"--ruby\",\n description: \"It will display the ruby directive information so you don't have to parse it from the Gemfile\"\n }]\n }, {\n name: \"clean\",\n description: \"Clean up unused gems in your Bundler directory\",\n options: [{\n name: \"--dry-run\",\n description: \"Print the changes, but do not clean the unused gems\"\n }, {\n name: \"--force\",\n description: \"Force a clean even if --path is not set\"\n }]\n }, {\n name: \"doctor\",\n description: \"Display warnings about common problems\",\n options: [{\n name: \"--quiet\",\n description: \"Only output warnings and errors\"\n }, {\n name: \"--gemfile\",\n description: \"The location of the Gemfile which Bundler should use\",\n args: {}\n }]\n }],\n options: [{\n name: \"--no-color\",\n description: \"Print all output without color\"\n }, {\n name: [\"--retry\", \"-r\"],\n description: \"Specify the number of times you wish to attempt network commands\"\n }, {\n name: [\"--verbose\", \"-V\"],\n description: \"Print out additional logging information\"\n }]\n },\n o = i;\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/bundle.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/cargo.js": +/*!***********************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/cargo.js ***! + \***********************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ Me)\n/* harmony export */ });\nfunction ownKeys(e, r) { var t = Object.keys(e); if (Object.getOwnPropertySymbols) { var o = Object.getOwnPropertySymbols(e); r && (o = o.filter(function (r) { return Object.getOwnPropertyDescriptor(e, r).enumerable; })), t.push.apply(t, o); } return t; }\nfunction _objectSpread(e) { for (var r = 1; r < arguments.length; r++) { var t = null != arguments[r] ? arguments[r] : {}; r % 2 ? ownKeys(Object(t), !0).forEach(function (r) { _defineProperty(e, r, t[r]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(e, Object.getOwnPropertyDescriptors(t)) : ownKeys(Object(t)).forEach(function (r) { Object.defineProperty(e, r, Object.getOwnPropertyDescriptor(t, r)); }); } return e; }\nfunction _defineProperty(obj, key, value) { key = _toPropertyKey(key); if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }\nfunction _toPropertyKey(t) { var i = _toPrimitive(t, \"string\"); return \"symbol\" == typeof i ? i : String(i); }\nfunction _toPrimitive(t, r) { if (\"object\" != typeof t || !t) return t; var e = t[Symbol.toPrimitive]; if (void 0 !== e) { var i = e.call(t, r || \"default\"); if (\"object\" != typeof i) return i; throw new TypeError(\"@@toPrimitive must return a primitive value.\"); } return (\"string\" === r ? String : Number)(t); }\nfunction asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }\nfunction _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, \"next\", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, \"throw\", err); } _next(undefined); }); }; }\nvar de = Object.create;\nvar K = Object.defineProperty;\nvar ue = Object.getOwnPropertyDescriptor;\nvar me = Object.getOwnPropertyNames;\nvar ge = Object.getPrototypeOf,\n fe = Object.prototype.hasOwnProperty;\nvar E = (e, t) => () => (t || e((t = {\n exports: {}\n}).exports, t), t.exports);\nvar he = (e, t, a, i) => {\n if (t && typeof t == \"object\" || typeof t == \"function\") {\n var _loop = function _loop(n) {\n !fe.call(e, n) && n !== a && K(e, n, {\n get: () => t[n],\n enumerable: !(i = ue(t, n)) || i.enumerable\n });\n };\n for (var n of me(t)) {\n _loop(n);\n }\n }\n return e;\n};\nvar be = (e, t, a) => (a = e != null ? de(ge(e)) : {}, he(t || !e || !e.__esModule ? K(a, \"default\", {\n value: e,\n enumerable: !0\n}) : a, e));\nvar Y = E(T => {\n \"use strict\";\n\n Object.defineProperty(T, \"__esModule\", {\n value: !0\n });\n T.shellExpand = T.ensureTrailingSlash = void 0;\n var ve = e => e.endsWith(\"/\") ? e : \"\".concat(e, \"/\");\n T.ensureTrailingSlash = ve;\n var ye = (e, t) => e.startsWith(\"~\") && (e.length === 1 || e.charAt(1) === \"/\") ? e.replace(\"~\", t) : e,\n ke = (e, t) => e.replace(/\\$([A-Za-z0-9_]+)/g, n => {\n var o;\n var s = n.slice(1);\n return (o = t[s]) !== null && o !== void 0 ? o : n;\n }).replace(/\\$\\{([A-Za-z0-9_]+)(?::-([^}]+))?\\}/g, (n, o, s) => {\n var l, r;\n return (r = (l = t[o]) !== null && l !== void 0 ? l : s) !== null && r !== void 0 ? r : n;\n }),\n we = (e, t) => {\n var a;\n var {\n environmentVariables: i\n } = t;\n return ke(ye(e, (a = i === null || i === void 0 ? void 0 : i.HOME) !== null && a !== void 0 ? a : \"~\"), i);\n };\n T.shellExpand = we;\n});\nvar X = E(C => {\n \"use strict\";\n\n var Re = C && C.__awaiter || function (e, t, a, i) {\n function n(o) {\n return o instanceof a ? o : new a(function (s) {\n s(o);\n });\n }\n return new (a || (a = Promise))(function (o, s) {\n function l(c) {\n try {\n p(i.next(c));\n } catch (g) {\n s(g);\n }\n }\n function r(c) {\n try {\n p(i.throw(c));\n } catch (g) {\n s(g);\n }\n }\n function p(c) {\n c.done ? o(c.value) : n(c.value).then(l, r);\n }\n p((i = i.apply(e, t || [])).next());\n });\n };\n Object.defineProperty(C, \"__esModule\", {\n value: !0\n });\n C.filepaths = C.folders = C.getCurrentInsertedDirectory = C.sortFilesAlphabetically = void 0;\n var Z = Y();\n function Q(e) {\n var t = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : [];\n var a = t.map(n => n.toLowerCase()),\n i = e.filter(n => !a.includes(n.toLowerCase()));\n return [...i.filter(n => !n.startsWith(\".\")).sort((n, o) => n.localeCompare(o)), ...i.filter(n => n.startsWith(\".\")).sort((n, o) => n.localeCompare(o)), \"../\"];\n }\n C.sortFilesAlphabetically = Q;\n var Ce = (e, t, a) => {\n if (e === null) return \"/\";\n var i = (0, Z.shellExpand)(t, a),\n n = i.slice(0, i.lastIndexOf(\"/\") + 1);\n return n === \"\" ? (0, Z.ensureTrailingSlash)(e) : n.startsWith(\"/\") ? n : \"\".concat((0, Z.ensureTrailingSlash)(e)).concat(n);\n };\n C.getCurrentInsertedDirectory = Ce;\n function N() {\n var e = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n var {\n extensions: t = [],\n equals: a = [],\n matches: i,\n filterFolders: n = !1,\n editFileSuggestions: o,\n editFolderSuggestions: s,\n rootDirectory: l,\n showFolders: r = \"always\"\n } = e,\n p = new Set(t),\n c = new Set(a),\n g = () => t.length > 0 || a.length > 0 || i,\n R = function R() {\n var v = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : [];\n return g() ? v.filter(_ref => {\n var {\n name: k = \"\",\n type: x\n } = _ref;\n if (!n && x === \"folder\" || c.has(k) || i && k.match(i)) return !0;\n var [, ...h] = k.split(\".\");\n if (h.length >= 1) {\n var S = h.length - 1,\n q = h[S];\n do {\n if (p.has(q)) return !0;\n S -= 1, q = [h[S], q].join(\".\");\n } while (S >= 0);\n }\n return !1;\n }) : v;\n },\n P = function P() {\n var v = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : [];\n return !o && !s ? v : v.map(k => Object.assign(Object.assign({}, k), (k.type === \"file\" ? o : s) || {}));\n };\n return {\n trigger: (v, k) => {\n var x = v.lastIndexOf(\"/\"),\n h = k.lastIndexOf(\"/\");\n return x !== h ? !0 : x === -1 && h === -1 ? !1 : v.slice(0, x) !== k.slice(0, h);\n },\n getQueryTerm: v => v.slice(v.lastIndexOf(\"/\") + 1),\n custom: (v, k, x) => Re(this, void 0, void 0, function* () {\n var h;\n var {\n isDangerous: S,\n currentWorkingDirectory: q,\n searchTerm: y\n } = x,\n O = (h = (0, C.getCurrentInsertedDirectory)(l !== null && l !== void 0 ? l : q, y, x)) !== null && h !== void 0 ? h : \"/\";\n try {\n var $ = yield k({\n command: \"ls\",\n args: [\"-1ApL\"],\n cwd: O\n }),\n pe = Q($.stdout.split(\"\\n\"), [\".DS_Store\"]),\n J = [];\n for (var j of pe) if (j) {\n var I = j.endsWith(\"/\") ? \"folders\" : \"filepaths\";\n (I === \"filepaths\" && r !== \"only\" || I === \"folders\" && r !== \"never\") && J.push({\n type: I === \"filepaths\" ? \"file\" : \"folder\",\n name: j,\n insertValue: j,\n isDangerous: S,\n context: {\n templateType: I\n }\n });\n }\n return P(R(J));\n } catch (_unused) {\n return [];\n }\n })\n };\n }\n C.folders = Object.assign(() => N({\n showFolders: \"only\"\n }), Object.freeze(N({\n showFolders: \"only\"\n })));\n C.filepaths = Object.assign(N, Object.freeze(N()));\n});\nvar ie = E(D => {\n \"use strict\";\n\n var A = D && D.__awaiter || function (e, t, a, i) {\n function n(o) {\n return o instanceof a ? o : new a(function (s) {\n s(o);\n });\n }\n return new (a || (a = Promise))(function (o, s) {\n function l(c) {\n try {\n p(i.next(c));\n } catch (g) {\n s(g);\n }\n }\n function r(c) {\n try {\n p(i.throw(c));\n } catch (g) {\n s(g);\n }\n }\n function p(c) {\n c.done ? o(c.value) : n(c.value).then(l, r);\n }\n p((i = i.apply(e, t || [])).next());\n });\n };\n Object.defineProperty(D, \"__esModule\", {\n value: !0\n });\n D.keyValueList = D.keyValue = D.valueList = void 0;\n var ee = new Map();\n function _(e, t) {\n return e.length === 0 ? t : t.map(a => a.insertValue ? a : Object.assign(Object.assign({}, a), {\n insertValue: a.name + e\n }));\n }\n function te(e, t, a) {\n return A(this, void 0, void 0, function* () {\n if (typeof e == \"function\") {\n var i = yield e(...a);\n return _(t, i);\n }\n if (typeof e[0] == \"string\") {\n var _i = e.map(n => ({\n name: n\n }));\n return _(t, _i);\n }\n return _(t, e);\n });\n }\n function M(e, t, a, i) {\n return A(this, void 0, void 0, function* () {\n if (a || Array.isArray(e)) {\n var n = ee.get(e);\n return n === void 0 && (n = yield te(e, t, i), ee.set(e, n)), n;\n }\n return te(e, t, i);\n });\n }\n function ae(e, t) {\n return typeof t == \"string\" ? e && t === \"keys\" || !e && t === \"values\" : t;\n }\n function B(e) {\n for (var _len = arguments.length, t = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {\n t[_key - 1] = arguments[_key];\n }\n return Math.max(...t.map(a => e.lastIndexOf(a)));\n }\n function L(e, t) {\n var a = new Set(e);\n return t.filter(i => {\n var n;\n return typeof i.name == \"string\" ? !a.has(i.name) : !(!((n = i.name) === null || n === void 0) && n.some(o => a.has(o)));\n });\n }\n function ze(_ref2) {\n var _this = this;\n var {\n delimiter: e = \",\",\n values: t = [],\n cache: a = !1,\n insertDelimiter: i = !1,\n allowRepeatedValues: n = !1\n } = _ref2;\n return {\n trigger: (o, s) => o.lastIndexOf(e) !== s.lastIndexOf(e),\n getQueryTerm: o => o.slice(o.lastIndexOf(e) + e.length),\n custom: function custom() {\n for (var _len2 = arguments.length, o = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {\n o[_key2] = arguments[_key2];\n }\n return A(_this, void 0, void 0, function* () {\n var s;\n var l = yield M(t, i ? e : \"\", a, o);\n if (n) return l;\n var [r] = o,\n p = (s = r[r.length - 1]) === null || s === void 0 ? void 0 : s.split(e);\n return L(p, l);\n });\n }\n };\n }\n D.valueList = ze;\n function Se(_ref3) {\n var _this2 = this;\n var {\n separator: e = \"=\",\n keys: t = [],\n values: a = [],\n cache: i = !1,\n insertSeparator: n = !0\n } = _ref3;\n return {\n trigger: (o, s) => o.indexOf(e) !== s.indexOf(e),\n getQueryTerm: o => o.slice(o.indexOf(e) + 1),\n custom: function custom() {\n for (var _len3 = arguments.length, o = new Array(_len3), _key3 = 0; _key3 < _len3; _key3++) {\n o[_key3] = arguments[_key3];\n }\n return A(_this2, void 0, void 0, function* () {\n var [s] = o,\n r = !s[s.length - 1].includes(e),\n p = r ? t : a,\n c = ae(r, i);\n return M(p, r && n ? e : \"\", c, o);\n });\n }\n };\n }\n D.keyValue = Se;\n function xe(_ref4) {\n var _this3 = this;\n var {\n separator: e = \"=\",\n delimiter: t = \",\",\n keys: a = [],\n values: i = [],\n cache: n = !1,\n insertSeparator: o = !0,\n insertDelimiter: s = !1,\n allowRepeatedKeys: l = !1,\n allowRepeatedValues: r = !0\n } = _ref4;\n return {\n trigger: (p, c) => {\n var g = B(p, e, t),\n R = B(c, e, t);\n return g !== R;\n },\n getQueryTerm: p => {\n var c = B(p, e, t);\n return p.slice(c + 1);\n },\n custom: function custom() {\n for (var _len4 = arguments.length, p = new Array(_len4), _key4 = 0; _key4 < _len4; _key4++) {\n p[_key4] = arguments[_key4];\n }\n return A(_this3, void 0, void 0, function* () {\n var [c] = p,\n g = c[c.length - 1],\n R = B(g, e, t),\n P = R === -1 || g.slice(R, R + e.length) !== e,\n v = P ? a : i,\n k = ae(P, n),\n h = yield M(v, P ? o ? e : \"\" : s ? t : \"\", k, p);\n if (P) {\n if (l) return h;\n var q = g.split(t).map(y => y.slice(0, y.indexOf(e)));\n return L(q, h);\n }\n if (r) return h;\n var S = g.split(t).map(q => q.slice(q.indexOf(e) + e.length));\n return L(S, h);\n });\n }\n };\n }\n D.keyValueList = xe;\n});\nvar ne = E(U => {\n \"use strict\";\n\n var qe = U && U.__awaiter || function (e, t, a, i) {\n function n(o) {\n return o instanceof a ? o : new a(function (s) {\n s(o);\n });\n }\n return new (a || (a = Promise))(function (o, s) {\n function l(c) {\n try {\n p(i.next(c));\n } catch (g) {\n s(g);\n }\n }\n function r(c) {\n try {\n p(i.throw(c));\n } catch (g) {\n s(g);\n }\n }\n function p(c) {\n c.done ? o(c.value) : n(c.value).then(l, r);\n }\n p((i = i.apply(e, t || [])).next());\n });\n };\n Object.defineProperty(U, \"__esModule\", {\n value: !0\n });\n U.ai = void 0;\n var Pe = 4097,\n Oe = 4,\n De = .8,\n Te = Pe * Oe * De;\n function Ue(_ref5) {\n var {\n name: e,\n prompt: t,\n message: a,\n postProcess: i,\n temperature: n,\n splitOn: o\n } = _ref5;\n return {\n scriptTimeout: 15e3,\n custom: (s, l, r) => qe(this, void 0, void 0, function* () {\n var p, c;\n var g = yield l({\n command: \"fig\",\n args: [\"settings\", \"--format\", \"json\", \"autocomplete.ai.enabled\"]\n });\n if (!JSON.parse(g.stdout)) return [];\n var R = typeof t == \"function\" ? yield t({\n tokens: s,\n executeCommand: l,\n generatorContext: r\n }) : t,\n P = typeof a == \"function\" ? yield a({\n tokens: s,\n executeCommand: l,\n generatorContext: r\n }) : a;\n if (P === null || P.length === 0) return console.warn(\"No message provided to AI generator\"), [];\n var v = Te - ((p = R === null || R === void 0 ? void 0 : R.length) !== null && p !== void 0 ? p : 0),\n k = {\n model: \"gpt-3.5-turbo\",\n source: \"autocomplete\",\n name: e,\n messages: [...(R ? [{\n role: \"system\",\n content: R\n }] : []), {\n role: \"user\",\n content: P.slice(0, v)\n }],\n temperature: n\n },\n x = JSON.stringify(k),\n h = yield l({\n command: \"fig\",\n args: [\"_\", \"request\", \"--route\", \"/ai/chat\", \"--method\", \"POST\", \"--body\", x]\n }),\n S = JSON.parse(h.stdout);\n return (c = S === null || S === void 0 ? void 0 : S.choices.map(y => {\n var O;\n return (O = y === null || y === void 0 ? void 0 : y.message) === null || O === void 0 ? void 0 : O.content;\n }).filter(y => typeof y == \"string\").flatMap(y => o ? y.split(o).filter(O => O.trim().length > 0) : [y]).map(y => {\n if (i) return i(y);\n var O = y.trim().replace(/\\n/g, \" \");\n return {\n icon: \"\\u{1FA84}\",\n name: O,\n insertValue: \"'\".concat(O, \"'\"),\n description: \"Generated by Fig AI\"\n };\n })) !== null && c !== void 0 ? c : [];\n })\n };\n }\n U.ai = Ue;\n});\nvar oe = E(z => {\n \"use strict\";\n\n var Ve = z && z.__createBinding || (Object.create ? function (e, t, a, i) {\n i === void 0 && (i = a);\n var n = Object.getOwnPropertyDescriptor(t, a);\n (!n || (\"get\" in n ? !t.__esModule : n.writable || n.configurable)) && (n = {\n enumerable: !0,\n get: function get() {\n return t[a];\n }\n }), Object.defineProperty(e, i, n);\n } : function (e, t, a, i) {\n i === void 0 && (i = a), e[i] = t[a];\n }),\n Ee = z && z.__exportStar || function (e, t) {\n for (var a in e) a !== \"default\" && !Object.prototype.hasOwnProperty.call(t, a) && Ve(t, e, a);\n };\n Object.defineProperty(z, \"__esModule\", {\n value: !0\n });\n z.ai = z.folders = z.filepaths = void 0;\n var re = X();\n Object.defineProperty(z, \"filepaths\", {\n enumerable: !0,\n get: function get() {\n return re.filepaths;\n }\n });\n Object.defineProperty(z, \"folders\", {\n enumerable: !0,\n get: function get() {\n return re.folders;\n }\n });\n Ee(ie(), z);\n var Ae = ne();\n Object.defineProperty(z, \"ai\", {\n enumerable: !0,\n get: function get() {\n return Ae.ai;\n }\n });\n});\nvar m = be(oe(), 1),\n G = [{\n name: \"2015\",\n description: \"2015 edition\"\n }, {\n name: \"2018\",\n description: \"2018 edition\"\n }, {\n name: \"2021\",\n description: \"2021 edition\"\n }],\n W = [{\n name: \"git\",\n icon: \"fig://icon?type=git\",\n description: \"Initialize with Git\"\n }, {\n name: \"hg\",\n icon: \"\\u2697\\uFE0F\",\n description: \"Initialize with Mercurial\"\n }, {\n name: \"pijul\",\n icon: \"\\u{1F99C}\",\n description: \"Initialize with Pijul\"\n }, {\n name: \"fossil\",\n icon: \"\\u{1F9B4}\",\n description: \"Initialize with Fossil\"\n }, {\n name: \"none\",\n icon: \"\\u{1F6AB}\",\n description: \"Initialize with no VCS\"\n }],\n le = e => {\n var t = \"\".concat(e.workspace_root, \"/Cargo.toml\");\n console.log(t);\n var a = e.packages.find(i => i.source === t);\n return a ? [a] : e.packages.filter(i => !i.source);\n },\n f = {\n script: [\"cargo\", \"metadata\", \"--format-version\", \"1\", \"--no-deps\"],\n postProcess: e => JSON.parse(e).packages.map(a => ({\n icon: \"\\u{1F4E6}\",\n name: a.name,\n description: \"\".concat(a.version).concat(a.description ? \" - \".concat(a.description) : \"\")\n }))\n },\n Fe = {\n script: [\"cargo\", \"metadata\", \"--format-version\", \"1\"],\n postProcess: e => {\n var t = JSON.parse(e),\n i = le(t).flatMap(n => n.dependencies).map(n => ({\n name: n.name,\n description: n.req\n }));\n return [...new Map(i.map(n => [n.name, n])).values()];\n }\n },\n u = _ref6 => {\n var {\n kind: e\n } = _ref6;\n return {\n custom: function () {\n var _custom = _asyncToGenerator(function* (t, a, i) {\n var {\n stdout: n\n } = yield a({\n command: \"cargo\",\n args: [\"metadata\", \"--format-version\", \"1\", \"--no-deps\"]\n }),\n o = JSON.parse(n),\n l = le(o).flatMap(r => r.targets);\n return e && (l = l.filter(r => r.kind.includes(e))), l.map(r => {\n var p = r.src_path.replace(i.currentWorkingDirectory, \"\");\n return {\n icon: \"\\u{1F3AF}\",\n name: r.name,\n description: p\n };\n });\n });\n function custom(_x, _x2, _x3) {\n return _custom.apply(this, arguments);\n }\n return custom;\n }()\n };\n },\n F = {\n script: [\"cargo\", \"metadata\", \"--format-version\", \"1\"],\n postProcess: e => JSON.parse(e).packages.map(a => ({\n name: a.name,\n description: a.description\n }))\n },\n w = {\n script: [\"cargo\", \"read-manifest\"],\n postProcess: e => {\n var t = JSON.parse(e);\n return Object.keys(t.features || {}).map(a => ({\n icon: \"\\u{1F39A}\",\n name: a,\n description: \"Features: [\".concat(t.features[a].join(\", \"), \"]\")\n }));\n }\n },\n se = {\n custom: function () {\n var _custom2 = _asyncToGenerator(function* (e, t) {\n var a = \"Makefile.toml\",\n i = e.findIndex(p => p === \"--makefile\");\n i !== -1 && e.length > i + 1 && (a = e[i + 1]);\n var n = [a],\n {\n stdout: o\n } = yield t({\n command: \"cat\",\n args: n\n }),\n s = /\\[tasks\\.([^\\]]+)\\]/g,\n l,\n r = [];\n for (; (l = s.exec(o)) !== null;) r.push({\n name: l[1]\n });\n return r;\n });\n function custom(_x4, _x5) {\n return _custom2.apply(this, arguments);\n }\n return custom;\n }()\n },\n H = {\n custom: function () {\n var _custom3 = _asyncToGenerator(function* (e, t) {\n var a = new Intl.NumberFormat(void 0, {\n notation: \"compact\",\n compactDisplay: \"short\",\n maximumSignificantDigits: 3\n }),\n i = e[e.length - 1];\n if (i.includes(\"@\") && !i.startsWith(\"@\")) {\n var [n, o] = i.split(\"@\"),\n s = encodeURIComponent(n),\n {\n stdout: l\n } = yield t({\n command: \"curl\",\n args: [\"-sfL\", \"https://crates.io/api/v1/crates/\".concat(s, \"/versions\")]\n });\n return JSON.parse(l).versions.map(p => ({\n name: \"\".concat(n, \"@\").concat(p.num),\n insertValue: \"\".concat(p.num),\n description: \"\".concat(a.format(p.downloads), \" downloads - \").concat(new Date(p.created_at).toLocaleDateString()),\n hidden: p.yanked\n }));\n } else if (i.length > 0) {\n var _n = encodeURIComponent(i),\n [{\n stdout: _o\n }, {\n stdout: _s\n }] = yield Promise.all([t({\n command: \"curl\",\n args: [\"-sfL\", \"https://crates.io/api/v1/crates?q=\".concat(_n, \"&per_page=60\")]\n }), t({\n command: \"cargo\",\n args: [\"metadata\", \"--format-version\", \"1\", \"--no-deps\"]\n })]),\n r = JSON.parse(_o).crates.sort((c, g) => g.recent_downloads - c.recent_downloads).map(c => ({\n icon: \"\\u{1F4E6}\",\n displayName: \"\".concat(c.name, \"@\").concat(c.newest_version),\n name: c.name,\n description: \"\".concat(a.format(c.recent_downloads)).concat(c.description ? \" - \".concat(c.description) : \"\")\n })),\n p = [];\n return _s.trim().length > 0 && (p = JSON.parse(_s).packages.filter(g => !g.source).map(g => ({\n icon: \"\\u{1F4E6}\",\n displayName: \"\".concat(g.name, \"@\").concat(g.version),\n name: g.name,\n description: \"Local Crate \".concat(g.version).concat(g.description ? \" - \".concat(g.description) : \"\")\n }))), r.concat(p);\n } else return [];\n });\n function custom(_x6, _x7) {\n return _custom3.apply(this, arguments);\n }\n return custom;\n }(),\n trigger: (e, t) => {\n var a = e.indexOf(\"@\"),\n i = t.indexOf(\"@\");\n return a === -1 && i === -1 || a !== i;\n },\n getQueryTerm: \"@\"\n },\n b = {\n script: [\"rustc\", \"--print\", \"target-list\"],\n postProcess: e => e.split(\"\\n\").filter(t => t.trim() !== \"\").map(t => ({\n name: t\n }))\n },\n V = [{\n name: \"true\"\n }, {\n name: \"false\"\n }],\n ce = {\n \"build.jobs\": {\n description: \"Sets the maximum number of compiler processes to run in parallel\"\n },\n \"build.rustc\": {\n description: \"Path to the rustc compiler\"\n },\n \"build.rustc-wrapper\": {\n description: \"Sets a wrapper to execute instead of rustc\"\n },\n \"build.target\": {\n description: \"The default target platform triples to compile to\"\n },\n \"build.target-dir\": {\n description: \"The path to where all compiler output is placed\"\n },\n \"build.rustflags\": {\n description: \"Extra command-line flags to pass to rustc\"\n },\n \"build.rustdocflags\": {\n description: \"Extra command-line flags to pass to rustdoc\"\n },\n \"build.incremental\": {\n description: \"Whether or not to perform incremental compilation\",\n tomlSuggestions: V\n },\n \"build.dep-info-basedir\": {\n description: \"Strips the given path prefix from dep info file paths\"\n },\n \"doc.browser\": {\n description: \"This option sets the browser to be used by cargo doc, overriding the BROWSER environment variable when opening documentation with the --open option\"\n },\n \"cargo-new.vcs\": {\n description: \"Specifies the source control system to use for initializing a new repository\",\n tomlSuggestions: W.map(e => _objectSpread(_objectSpread({}, e), {}, {\n name: \"\\\\\\\"\".concat(e.name, \"\\\\\\\"\"),\n insertValue: \"\\\\\\\"\".concat(e.name, \"\\\\\\\"\")\n }))\n },\n \"future-incompat-report.frequency\": {\n description: \"Controls how often we display a notification to the terminal when a future incompat report is available\",\n tomlSuggestions: [{\n name: '\\\\\"always\\\\\"',\n insertValue: '\\\\\"always\\\\\"',\n description: \"Always display a notification when a command (e.g. cargo build) produces a future incompat report\"\n }, {\n name: '\\\\\"never\\\\\"',\n insertValue: '\\\\\"never\\\\\"',\n description: \"Never display a notification\"\n }]\n },\n \"http.debug\": {\n description: \"If true, enables debugging of HTTP requests\",\n tomlSuggestions: V\n },\n \"http.proxy\": {\n description: \"Sets an HTTP and HTTPS proxy to use\"\n },\n \"http.timeout\": {\n description: \"Sets the timeout for each HTTP request, in seconds\"\n },\n \"http.cainfo\": {\n description: \"Sets the path to a CA certificate bundle\"\n },\n \"http.check-revoke\": {\n description: \"This determines whether or not TLS certificate revocation checks should be performed. This only works on Windows\",\n tomlSuggestions: V\n },\n \"http.ssl-version\": {\n description: \"This sets the minimum TLS version to use\"\n },\n \"http.low-speed-limit\": {\n description: \"This setting controls timeout behavior for slow connections\"\n },\n \"http.multiplexing\": {\n description: \"When `true`, Cargo will attempt to use the HTTP2 protocol with multiplexing\",\n tomlSuggestions: V\n },\n \"http.user-agent\": {\n description: \"Specifies a custom user-agent header to use\"\n },\n \"install.root\": {\n description: \"Sets the path to the root directory for installing executables for `cargo install`\"\n },\n \"net.retry\": {\n description: \"Number of times to retry possibly spurious network errors\"\n },\n \"net.git-fetch-with-cli\": {\n description: \"If this is `true`, then Cargo will use the git executable to fetch registry indexes and git dependencies. If `false`, then it uses a built-in git library\",\n tomlSuggestions: V\n },\n \"net.offline\": {\n description: \"If this is true, then Cargo will avoid accessing the network, and attempt to proceed with locally cached data\",\n tomlSuggestions: V\n }\n },\n d = (0, m.keyValue)({\n keys: Object.entries(ce).map(_ref7 => {\n var [e, t] = _ref7;\n return _objectSpread({\n name: e\n }, t);\n }),\n values: function () {\n var _values = _asyncToGenerator(function* (e, t) {\n var _e$split;\n var a = (_e$split = e[e.length - 1].split(\"=\")) === null || _e$split === void 0 ? void 0 : _e$split[0],\n i = ce[a];\n if (i !== null && i !== void 0 && i.tomlSuggestions) return i.tomlSuggestions;\n });\n function values(_x8, _x9) {\n return _values.apply(this, arguments);\n }\n return values;\n }(),\n separator: \"=\"\n }),\n je = function je() {\n var e = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : !0;\n return {\n name: \"cargo\",\n icon: \"\\u{1F4E6}\",\n description: \"CLI Interface for Cargo\",\n subcommands: [{\n name: \"bench\",\n icon: \"\\u{1F4CA}\",\n description: \"Execute all benchmarks of a local package\",\n options: [{\n name: \"--bin\",\n description: \"Benchmark only the specified binary\",\n isRepeatable: !0,\n args: {\n name: \"bin\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bin\"\n }),\n isVariadic: !0\n }\n }, {\n name: \"--example\",\n description: \"Benchmark only the specified example\",\n isRepeatable: !0,\n args: {\n name: \"example\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"example\"\n })\n }\n }, {\n name: \"--test\",\n description: \"Benchmark only the specified test target\",\n isRepeatable: !0,\n args: {\n name: \"test\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"test\"\n })\n }\n }, {\n name: \"--bench\",\n description: \"Benchmark only the specified bench target\",\n isRepeatable: !0,\n args: {\n name: \"bench\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bench\"\n })\n }\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package to run benchmarks for\",\n isRepeatable: !0,\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--exclude\",\n description: \"Exclude packages from the benchmark\",\n isRepeatable: !0,\n args: {\n name: \"exclude\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--profile\",\n description: \"Build artifacts with the specified profile\",\n args: {\n name: \"profile\"\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--target\",\n description: \"Build for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--message-format\",\n description: \"Error format\",\n isRepeatable: !0,\n args: {\n name: \"message-format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--lib\",\n description: \"Benchmark only this package's library\"\n }, {\n name: \"--bins\",\n description: \"Benchmark all binaries\"\n }, {\n name: \"--examples\",\n description: \"Benchmark all examples\"\n }, {\n name: \"--tests\",\n description: \"Benchmark all tests\"\n }, {\n name: \"--benches\",\n description: \"Benchmark all benches\"\n }, {\n name: \"--all-targets\",\n description: \"Benchmark all targets\"\n }, {\n name: \"--no-run\",\n description: \"Compile, but don't run benchmarks\"\n }, {\n name: \"--workspace\",\n description: \"Benchmark all packages in the workspace\"\n }, {\n name: \"--all\",\n description: \"Alias for --workspace (deprecated)\",\n hidden: !0\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--ignore-rust-version\",\n description: \"Ignore `rust-version` specification in packages\"\n }, {\n name: \"--no-fail-fast\",\n description: \"Run all benchmarks regardless of failure\"\n }, {\n name: \"--unit-graph\",\n description: \"Output build graph in JSON (unstable)\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: \"--timings\",\n description: \"Timing output formats (unstable)\"\n }],\n args: [{\n name: \"BENCHNAME\"\n }, {\n name: \"args\",\n isVariadic: !0\n }]\n }, {\n name: [\"build\", \"b\"],\n icon: \"\\u{1F4E6}\",\n description: \"Compile a local package and all of its dependencies\",\n options: [{\n name: [\"-p\", \"--package\"],\n description: \"Package to build (see `cargo help pkgid`)\",\n isRepeatable: !0,\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--exclude\",\n description: \"Exclude packages from the build\",\n isRepeatable: !0,\n args: {\n name: \"exclude\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--bin\",\n description: \"Build only the specified binary\",\n isRepeatable: !0,\n args: {\n name: \"bin\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bin\"\n }),\n isVariadic: !0\n }\n }, {\n name: \"--example\",\n description: \"Build only the specified example\",\n isRepeatable: !0,\n args: {\n name: \"example\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"example\"\n })\n }\n }, {\n name: \"--test\",\n description: \"Build only the specified test target\",\n isRepeatable: !0,\n args: {\n name: \"test\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"test\"\n })\n }\n }, {\n name: \"--bench\",\n description: \"Build only the specified bench target\",\n isRepeatable: !0,\n args: {\n name: \"bench\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bench\"\n })\n }\n }, {\n name: \"--profile\",\n description: \"Build artifacts with the specified profile\",\n args: {\n name: \"profile\"\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--target\",\n description: \"Build for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--out-dir\",\n description: \"Copy final artifacts to this directory (unstable)\",\n args: {\n name: \"out-dir\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--message-format\",\n description: \"Error format\",\n isRepeatable: !0,\n args: {\n name: \"message-format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--workspace\",\n description: \"Build all packages in the workspace\"\n }, {\n name: \"--all\",\n description: \"Alias for --workspace (deprecated)\",\n hidden: !0\n }, {\n name: \"--lib\",\n description: \"Build only this package's library\"\n }, {\n name: \"--bins\",\n description: \"Build all binaries\"\n }, {\n name: \"--examples\",\n description: \"Build all examples\"\n }, {\n name: \"--tests\",\n description: \"Build all tests\"\n }, {\n name: \"--benches\",\n description: \"Build all benches\"\n }, {\n name: \"--all-targets\",\n description: \"Build all targets\"\n }, {\n name: [\"-r\", \"--release\"],\n description: \"Build artifacts in release mode, with optimizations\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--ignore-rust-version\",\n description: \"Ignore `rust-version` specification in packages\"\n }, {\n name: \"--build-plan\",\n description: \"Output the build plan in JSON (unstable)\"\n }, {\n name: \"--unit-graph\",\n description: \"Output build graph in JSON (unstable)\"\n }, {\n name: \"--future-incompat-report\",\n description: \"Outputs a future incompatibility report at the end of the build\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: \"--timings\",\n description: \"Timing output formats (unstable)\"\n }]\n }, {\n name: [\"check\", \"c\"],\n icon: \"\\u{1F6E0}\",\n description: \"Check a local package and all of its dependencies for errors\",\n options: [{\n name: [\"-p\", \"--package\"],\n description: \"Package(s) to check\",\n isRepeatable: !0,\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--exclude\",\n description: \"Exclude packages from the check\",\n isRepeatable: !0,\n args: {\n name: \"exclude\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--bin\",\n description: \"Check only the specified binary\",\n isRepeatable: !0,\n args: {\n name: \"bin\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bin\"\n }),\n isVariadic: !0\n }\n }, {\n name: \"--example\",\n description: \"Check only the specified example\",\n isRepeatable: !0,\n args: {\n name: \"example\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"example\"\n })\n }\n }, {\n name: \"--test\",\n description: \"Check only the specified test target\",\n isRepeatable: !0,\n args: {\n name: \"test\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"test\"\n })\n }\n }, {\n name: \"--bench\",\n description: \"Check only the specified bench target\",\n isRepeatable: !0,\n args: {\n name: \"bench\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bench\"\n })\n }\n }, {\n name: \"--profile\",\n description: \"Check artifacts with the specified profile\",\n args: {\n name: \"profile\"\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--target\",\n description: \"Check for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--message-format\",\n description: \"Error format\",\n isRepeatable: !0,\n args: {\n name: \"message-format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--workspace\",\n description: \"Check all packages in the workspace\"\n }, {\n name: \"--all\",\n description: \"Alias for --workspace (deprecated)\",\n hidden: !0\n }, {\n name: \"--lib\",\n description: \"Check only this package's library\"\n }, {\n name: \"--bins\",\n description: \"Check all binaries\"\n }, {\n name: \"--examples\",\n description: \"Check all examples\"\n }, {\n name: \"--tests\",\n description: \"Check all tests\"\n }, {\n name: \"--benches\",\n description: \"Check all benches\"\n }, {\n name: \"--all-targets\",\n description: \"Check all targets\"\n }, {\n name: [\"-r\", \"--release\"],\n description: \"Check artifacts in release mode, with optimizations\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--ignore-rust-version\",\n description: \"Ignore `rust-version` specification in packages\"\n }, {\n name: \"--unit-graph\",\n description: \"Output build graph in JSON (unstable)\"\n }, {\n name: \"--future-incompat-report\",\n description: \"Outputs a future incompatibility report at the end of the build\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: \"--timings\",\n description: \"Timing output formats (unstable)\"\n }]\n }, {\n name: \"clean\",\n icon: \"\\u{1F6E0}\",\n description: \"Remove artifacts that cargo has generated in the past\",\n options: [{\n name: [\"-p\", \"--package\"],\n description: \"Package to clean artifacts for\",\n isRepeatable: !0,\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--target\",\n description: \"Target triple to clean output for\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--profile\",\n description: \"Clean artifacts of the specified profile\",\n args: {\n name: \"profile\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-r\", \"--release\"],\n description: \"Whether or not to clean release artifacts\"\n }, {\n name: \"--doc\",\n description: \"Whether or not to clean just the documentation directory\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"config\",\n icon: \"\\u2699\\uFE0F\",\n description: \"Inspect configuration values\",\n subcommands: [{\n name: \"get\",\n options: [{\n name: \"--format\",\n description: \"Display format\",\n args: {\n name: \"format\",\n suggestions: [\"toml\", \"json\", \"json-value\"]\n }\n }, {\n name: \"--merged\",\n description: \"Whether or not to merge config values\",\n args: {\n name: \"merged\",\n suggestions: [\"yes\", \"no\"]\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: \"--version\",\n description: \"Print version information\"\n }, {\n name: \"--show-origin\",\n description: \"Display where the config value is defined\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"help\",\n description: \"Print this message or the help of the given subcommand(s)\",\n options: [{\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: \"--version\",\n description: \"Print version information\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"subcommand\"\n }\n }],\n options: [{\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: [\"doc\", \"d\"],\n icon: \"\\u{1F4C4}\",\n description: \"Build a package's documentation\",\n options: [{\n name: [\"-p\", \"--package\"],\n description: \"Package to document\",\n isRepeatable: !0,\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--exclude\",\n description: \"Exclude packages from the build\",\n isRepeatable: !0,\n args: {\n name: \"exclude\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--bin\",\n description: \"Document only the specified binary\",\n isRepeatable: !0,\n args: {\n name: \"bin\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bin\"\n }),\n isVariadic: !0\n }\n }, {\n name: \"--example\",\n description: \"Document only the specified example\",\n isRepeatable: !0,\n args: {\n name: \"example\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"example\"\n })\n }\n }, {\n name: \"--profile\",\n description: \"Build artifacts with the specified profile\",\n args: {\n name: \"profile\"\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--target\",\n description: \"Build for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--message-format\",\n description: \"Error format\",\n isRepeatable: !0,\n args: {\n name: \"message-format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\"\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--open\",\n description: \"Opens the docs in a browser after the operation\"\n }, {\n name: \"--workspace\",\n description: \"Document all packages in the workspace\"\n }, {\n name: \"--all\",\n description: \"Alias for --workspace (deprecated)\",\n hidden: !0\n }, {\n name: \"--no-deps\",\n description: \"Don't build documentation for dependencies\"\n }, {\n name: \"--document-private-items\",\n description: \"Document private items\"\n }, {\n name: \"--lib\",\n description: \"Document only this package's library\"\n }, {\n name: \"--bins\",\n description: \"Document all binaries\"\n }, {\n name: \"--examples\",\n description: \"Document all examples\"\n }, {\n name: [\"-r\", \"--release\"],\n description: \"Build artifacts in release mode, with optimizations\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--ignore-rust-version\",\n description: \"Ignore `rust-version` specification in packages\"\n }, {\n name: \"--unit-graph\",\n description: \"Output build graph in JSON (unstable)\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: \"--timings\",\n description: \"Timing output formats (unstable)\"\n }]\n }, {\n name: \"fetch\",\n icon: \"\\u{1F4E6}\",\n description: \"Fetch dependencies of a package from the network\",\n options: [{\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--target\",\n description: \"Fetch dependencies for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"fix\",\n icon: \"\\u{1F527}\",\n description: \"Automatically fix lint warnings reported by rustc\",\n options: [{\n name: [\"-p\", \"--package\"],\n description: \"Package(s) to fix\",\n isRepeatable: !0,\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--exclude\",\n description: \"Exclude packages from the fixes\",\n isRepeatable: !0,\n args: {\n name: \"exclude\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--bin\",\n description: \"Fix only the specified binary\",\n isRepeatable: !0,\n args: {\n name: \"bin\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bin\"\n }),\n isVariadic: !0\n }\n }, {\n name: \"--example\",\n description: \"Fix only the specified example\",\n isRepeatable: !0,\n args: {\n name: \"example\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"example\"\n })\n }\n }, {\n name: \"--test\",\n description: \"Fix only the specified test target\",\n isRepeatable: !0,\n args: {\n name: \"test\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"test\"\n })\n }\n }, {\n name: \"--bench\",\n description: \"Fix only the specified bench target\",\n isRepeatable: !0,\n args: {\n name: \"bench\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bench\"\n })\n }\n }, {\n name: \"--profile\",\n description: \"Build artifacts with the specified profile\",\n args: {\n name: \"profile\"\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--target\",\n description: \"Fix for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--message-format\",\n description: \"Error format\",\n isRepeatable: !0,\n args: {\n name: \"message-format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--workspace\",\n description: \"Fix all packages in the workspace\"\n }, {\n name: \"--all\",\n description: \"Alias for --workspace (deprecated)\",\n hidden: !0\n }, {\n name: \"--lib\",\n description: \"Fix only this package's library\"\n }, {\n name: \"--bins\",\n description: \"Fix all binaries\"\n }, {\n name: \"--examples\",\n description: \"Fix all examples\"\n }, {\n name: \"--tests\",\n description: \"Fix all tests\"\n }, {\n name: \"--benches\",\n description: \"Fix all benches\"\n }, {\n name: \"--all-targets\",\n description: \"Fix all targets (default)\"\n }, {\n name: [\"-r\", \"--release\"],\n description: \"Fix artifacts in release mode, with optimizations\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--broken-code\",\n description: \"Fix code even if it already has compiler errors\"\n }, {\n name: \"--edition\",\n description: \"Fix in preparation for the next edition\"\n }, {\n name: \"--edition-idioms\",\n description: \"Fix warnings to migrate to the idioms of an edition\"\n }, {\n name: \"--allow-no-vcs\",\n description: \"Fix code even if a VCS was not detected\"\n }, {\n name: \"--allow-dirty\",\n description: \"Fix code even if the working directory is dirty\"\n }, {\n name: \"--allow-staged\",\n description: \"Fix code even if the working directory has staged changes\"\n }, {\n name: \"--ignore-rust-version\",\n description: \"Ignore `rust-version` specification in packages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: \"--timings\",\n description: \"Timing output formats (unstable)\"\n }]\n }, {\n name: \"generate-lockfile\",\n icon: \"\\u{1F4E6}\",\n description: \"Generate the lockfile for a package\",\n options: [{\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"git-checkout\",\n icon: \"\\u{1F4E6}\",\n description: \"This subcommand has been removed\",\n hidden: !0,\n options: [{\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"init\",\n icon: \"\\u{1F4E6}\",\n description: \"Create a new cargo package in an existing directory\",\n options: [{\n name: \"--registry\",\n description: \"Registry to use\",\n args: {\n name: \"registry\"\n }\n }, {\n name: \"--vcs\",\n description: \"Initialize a new repository for the given version control system (git, hg, pijul, or fossil) or do not initialize any version control at all (none), overriding a global configuration\",\n args: {\n name: \"vcs\",\n suggestions: W\n }\n }, {\n name: \"--edition\",\n description: \"Edition to set for the crate generated\",\n args: {\n name: \"edition\",\n suggestions: G\n }\n }, {\n name: \"--name\",\n description: \"Set the resulting package name, defaults to the directory name\",\n args: {\n name: \"name\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--bin\",\n description: \"Use a binary (application) template [default]\"\n }, {\n name: \"--lib\",\n description: \"Use a library template\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"path\"\n }\n }, {\n name: \"install\",\n icon: \"\\u{1F4E6}\",\n description: \"Install a Rust binary. Default location is $HOME/.cargo/bin\",\n options: [{\n name: \"--version\",\n description: \"Specify a version to install\",\n args: {\n name: \"version\"\n }\n }, {\n name: \"--git\",\n description: \"Git URL to install the specified crate from\",\n exclusiveOn: [\"--path\", \"--index\", \"--registry\"],\n args: {\n name: \"git\"\n }\n }, {\n name: \"--branch\",\n description: \"Branch to use when installing from git\",\n args: {\n name: \"branch\"\n }\n }, {\n name: \"--tag\",\n description: \"Tag to use when installing from git\",\n args: {\n name: \"tag\"\n }\n }, {\n name: \"--rev\",\n description: \"Specific commit to use when installing from git\",\n args: {\n name: \"rev\"\n }\n }, {\n name: \"--path\",\n description: \"Filesystem path to local crate to install\",\n exclusiveOn: [\"--git\", \"--index\", \"--registry\"],\n args: {\n name: \"path\",\n template: \"folders\"\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--profile\",\n description: \"Install artifacts with the specified profile\",\n args: {\n name: \"profile\"\n }\n }, {\n name: \"--bin\",\n description: \"Install only the specified binary\",\n isRepeatable: !0,\n args: {\n name: \"bin\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bin\"\n })\n }\n }, {\n name: \"--example\",\n description: \"Install only the specified example\",\n isRepeatable: !0,\n args: {\n name: \"example\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"example\"\n })\n }\n }, {\n name: \"--target\",\n description: \"Build for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--root\",\n description: \"Directory to install packages into\",\n args: {\n name: \"root\"\n }\n }, {\n name: \"--index\",\n description: \"Registry index to install from\",\n exclusiveOn: [\"--git\", \"--path\", \"--registry\"],\n args: {\n name: \"index\"\n }\n }, {\n name: \"--registry\",\n description: \"Registry to use\",\n exclusiveOn: [\"--git\", \"--path\", \"--index\"],\n args: {\n name: \"registry\"\n }\n }, {\n name: \"--message-format\",\n description: \"Error format\",\n isRepeatable: !0,\n args: {\n name: \"message-format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--list\",\n description: \"List all installed packages and their versions\"\n }, {\n name: [\"-f\", \"--force\"],\n description: \"Force overwriting existing crates or binaries\"\n }, {\n name: \"--no-track\",\n description: \"Do not save tracking information\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--debug\",\n description: \"Build in debug mode instead of release mode\"\n }, {\n name: \"--bins\",\n description: \"Install all binaries\"\n }, {\n name: \"--examples\",\n description: \"Install all examples\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: \"--timings\",\n description: \"Timing output formats (unstable)\"\n }],\n args: {\n name: \"crate\",\n generators: H,\n filterStrategy: \"fuzzy\",\n debounce: !0,\n isVariadic: !0,\n suggestCurrentToken: !0\n }\n }, {\n name: \"locate-project\",\n icon: \"\\u{1F4E6}\",\n description: \"Print a JSON representation of a Cargo.toml file's location\",\n options: [{\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--message-format\",\n description: \"Output representation [possible values: json, plain]\",\n args: {\n name: \"message-format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--workspace\",\n description: \"Locate Cargo.toml of the workspace root\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"login\",\n icon: \"\\u{1F4E6}\",\n description: \"Save an api token from the registry locally. If token is not specified, it will be read from stdin\",\n options: [{\n name: \"--registry\",\n description: \"Registry to use\",\n args: {\n name: \"registry\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"token\"\n }\n }, {\n name: \"logout\",\n icon: \"\\u{1F4E6}\",\n description: \"Remove an API token from the registry locally\",\n options: [{\n name: \"--registry\",\n description: \"Registry to use\",\n args: {\n name: \"registry\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"metadata\",\n icon: \"\\u{1F4E6}\",\n description: \"Output the resolved dependencies of a package, the concrete used versions including overrides, in machine-readable format\",\n options: [{\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--filter-platform\",\n description: \"Only include resolve dependencies matching the given target-triple\",\n isRepeatable: !0,\n args: {\n name: \"filter-platform\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--format-version\",\n description: \"Format version\",\n args: {\n name: \"format-version\",\n suggestions: [\"1\"]\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--no-deps\",\n description: \"Output information only about the workspace members and don't fetch dependencies\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"new\",\n icon: \"\\u{1F4E6}\",\n description: \"Create a new cargo package at \",\n options: [{\n name: \"--registry\",\n description: \"Registry to use\",\n args: {\n name: \"registry\"\n }\n }, {\n name: \"--vcs\",\n description: \"Initialize a new repository for the given version control system (git, hg, pijul, or fossil) or do not initialize any version control at all (none), overriding a global configuration\",\n args: {\n name: \"vcs\",\n suggestions: W\n }\n }, {\n name: \"--edition\",\n description: \"Edition to set for the crate generated\",\n args: {\n name: \"edition\",\n suggestions: G\n }\n }, {\n name: \"--name\",\n description: \"Set the resulting package name, defaults to the directory name\",\n args: {\n name: \"name\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--bin\",\n description: \"Use a binary (application) template [default]\"\n }, {\n name: \"--lib\",\n description: \"Use a library template\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"path\",\n template: \"folders\"\n }\n }, {\n name: \"owner\",\n icon: \"\\u{1F4E6}\",\n description: \"Manage the owners of a crate on the registry\",\n options: [{\n name: [\"-a\", \"--add\"],\n description: \"Name of a user or team to invite as an owner\",\n isRepeatable: !0,\n args: {\n name: \"add\"\n }\n }, {\n name: [\"-r\", \"--remove\"],\n description: \"Name of a user or team to remove as an owner\",\n isRepeatable: !0,\n args: {\n name: \"remove\"\n }\n }, {\n name: \"--index\",\n description: \"Registry index to modify owners for\",\n args: {\n name: \"index\"\n }\n }, {\n name: \"--token\",\n description: \"API token to use when authenticating\",\n args: {\n name: \"token\"\n }\n }, {\n name: \"--registry\",\n description: \"Registry to use\",\n args: {\n name: \"registry\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-l\", \"--list\"],\n description: \"List owners of a crate\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"crate\"\n }\n }, {\n name: \"package\",\n icon: \"\\u{1F4E6}\",\n description: \"Assemble the local package into a distributable tarball\",\n options: [{\n name: \"--target\",\n description: \"Build for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package(s) to assemble\",\n isRepeatable: !0,\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--exclude\",\n description: \"Don't assemble specified packages\",\n isRepeatable: !0,\n args: {\n name: \"exclude\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-l\", \"--list\"],\n description: \"Print files included in a package without making one\"\n }, {\n name: \"--no-verify\",\n description: \"Don't verify the contents by building them\"\n }, {\n name: \"--no-metadata\",\n description: \"Ignore warnings about a lack of human-usable metadata\"\n }, {\n name: \"--allow-dirty\",\n description: \"Allow dirty working directories to be packaged\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--workspace\",\n description: \"Assemble all packages in the workspace\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"pkgid\",\n icon: \"\\u{1F4E6}\",\n description: \"Print a fully qualified package specification\",\n options: [{\n name: [\"-p\", \"--package\"],\n description: \"Argument to get the package ID specifier for\",\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"SPEC\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"publish\",\n icon: \"\\u{1F4E6}\",\n description: \"Upload a package to the registry\",\n options: [{\n name: \"--index\",\n description: \"Registry index URL to upload the package to\",\n args: {\n name: \"index\"\n }\n }, {\n name: \"--token\",\n description: \"Token to use when uploading\",\n args: {\n name: \"token\"\n }\n }, {\n name: \"--target\",\n description: \"Build for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package to publish\",\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--registry\",\n description: \"Registry to publish to\",\n args: {\n name: \"registry\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--no-verify\",\n description: \"Don't verify the contents by building them\"\n }, {\n name: \"--allow-dirty\",\n description: \"Allow dirty working directories to be packaged\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--dry-run\",\n description: \"Perform all checks without uploading\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"read-manifest\",\n icon: \"\\u{1F4E6}\",\n description: \"Print a JSON representation of a Cargo.toml manifest. Deprecated, use `cargo metadata --no-deps` instead\",\n hidden: !0,\n options: [{\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"report\",\n icon: \"\\u{1F4E6}\",\n description: \"Generate and display various kinds of reports\",\n subcommands: [{\n name: \"future-incompatibilities\",\n description: \"Reports any crates which will eventually stop compiling\",\n options: [{\n name: \"--id\",\n description: \"Identifier of the report generated by a Cargo command invocation\",\n args: {\n name: \"id\"\n }\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package to display a report for\",\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: \"--version\",\n description: \"Print version information\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"help\",\n description: \"Print this message or the help of the given subcommand(s)\",\n options: [{\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: \"--version\",\n description: \"Print version information\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"subcommand\"\n }\n }],\n options: [{\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: [\"run\", \"r\"],\n icon: \"\\u{1F4E6}\",\n description: \"Run a binary or example of the local package\",\n options: [{\n name: \"--bin\",\n description: \"Name of the bin target to run\",\n isRepeatable: !0,\n args: {\n name: \"bin\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bin\"\n })\n }\n }, {\n name: \"--example\",\n description: \"Name of the example target to run\",\n isRepeatable: !0,\n args: {\n name: \"example\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"example\"\n })\n }\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package with the target to run\",\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--profile\",\n description: \"Build artifacts with the specified profile\",\n args: {\n name: \"profile\"\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--target\",\n description: \"Build for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--message-format\",\n description: \"Error format\",\n isRepeatable: !0,\n args: {\n name: \"message-format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-r\", \"--release\"],\n description: \"Build artifacts in release mode, with optimizations\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--unit-graph\",\n description: \"Output build graph in JSON (unstable)\"\n }, {\n name: \"--ignore-rust-version\",\n description: \"Ignore `rust-version` specification in packages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: \"--timings\",\n description: \"Timing output formats (unstable)\"\n }],\n args: {\n name: \"args\",\n isVariadic: !0,\n isOptional: !0\n }\n }, {\n name: \"rustc\",\n icon: \"\\u{1F4E6}\",\n description: \"Compile a package, and pass extra options to the compiler\",\n options: [{\n name: [\"-p\", \"--package\"],\n description: \"Package to build\",\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--bin\",\n description: \"Build only the specified binary\",\n isRepeatable: !0,\n args: {\n name: \"bin\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bin\"\n }),\n isVariadic: !0\n }\n }, {\n name: \"--example\",\n description: \"Build only the specified example\",\n isRepeatable: !0,\n args: {\n name: \"example\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"example\"\n })\n }\n }, {\n name: \"--test\",\n description: \"Build only the specified test target\",\n isRepeatable: !0,\n args: {\n name: \"test\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"test\"\n })\n }\n }, {\n name: \"--bench\",\n description: \"Build only the specified bench target\",\n isRepeatable: !0,\n args: {\n name: \"bench\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bench\"\n })\n }\n }, {\n name: \"--profile\",\n description: \"Build artifacts with the specified profile\",\n args: {\n name: \"profile\"\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--target\",\n description: \"Target triple which compiles will be for\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--print\",\n description: \"Output compiler information without compiling\",\n args: {\n name: \"print\"\n }\n }, {\n name: \"--crate-type\",\n description: \"Comma separated list of types of crates for the compiler to emit\",\n isRepeatable: !0,\n args: {\n name: \"crate-type\"\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--message-format\",\n description: \"Error format\",\n isRepeatable: !0,\n args: {\n name: \"message-format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--lib\",\n description: \"Build only this package's library\"\n }, {\n name: \"--bins\",\n description: \"Build all binaries\"\n }, {\n name: \"--examples\",\n description: \"Build all examples\"\n }, {\n name: \"--tests\",\n description: \"Build all tests\"\n }, {\n name: \"--benches\",\n description: \"Build all benches\"\n }, {\n name: \"--all-targets\",\n description: \"Build all targets\"\n }, {\n name: [\"-r\", \"--release\"],\n description: \"Build artifacts in release mode, with optimizations\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--unit-graph\",\n description: \"Output build graph in JSON (unstable)\"\n }, {\n name: \"--ignore-rust-version\",\n description: \"Ignore `rust-version` specification in packages\"\n }, {\n name: \"--future-incompat-report\",\n description: \"Outputs a future incompatibility report at the end of the build\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: \"--timings\",\n description: \"Timing output formats (unstable)\"\n }],\n args: {\n name: \"args\",\n isVariadic: !0\n }\n }, {\n name: \"rustdoc\",\n icon: \"\\u{1F4E6}\",\n description: \"Build a package's documentation, using specified custom flags\",\n options: [{\n name: [\"-p\", \"--package\"],\n description: \"Package to document\",\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--bin\",\n description: \"Build only the specified binary\",\n isRepeatable: !0,\n args: {\n name: \"bin\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bin\"\n }),\n isVariadic: !0\n }\n }, {\n name: \"--example\",\n description: \"Build only the specified example\",\n isRepeatable: !0,\n args: {\n name: \"example\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"example\"\n })\n }\n }, {\n name: \"--test\",\n description: \"Build only the specified test target\",\n isRepeatable: !0,\n args: {\n name: \"test\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"test\"\n })\n }\n }, {\n name: \"--bench\",\n description: \"Build only the specified bench target\",\n isRepeatable: !0,\n args: {\n name: \"bench\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bench\"\n })\n }\n }, {\n name: \"--profile\",\n description: \"Build artifacts with the specified profile\",\n args: {\n name: \"profile\"\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--target\",\n description: \"Build for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--message-format\",\n description: \"Error format\",\n isRepeatable: !0,\n args: {\n name: \"message-format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--open\",\n description: \"Opens the docs in a browser after the operation\"\n }, {\n name: \"--lib\",\n description: \"Build only this package's library\"\n }, {\n name: \"--bins\",\n description: \"Build all binaries\"\n }, {\n name: \"--examples\",\n description: \"Build all examples\"\n }, {\n name: \"--tests\",\n description: \"Build all tests\"\n }, {\n name: \"--benches\",\n description: \"Build all benches\"\n }, {\n name: \"--all-targets\",\n description: \"Build all targets\"\n }, {\n name: [\"-r\", \"--release\"],\n description: \"Build artifacts in release mode, with optimizations\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--unit-graph\",\n description: \"Output build graph in JSON (unstable)\"\n }, {\n name: \"--ignore-rust-version\",\n description: \"Ignore `rust-version` specification in packages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: \"--timings\",\n description: \"Timing output formats (unstable)\"\n }],\n args: {\n name: \"args\",\n isVariadic: !0\n }\n }, {\n name: \"search\",\n icon: \"\\u{1F50E}\",\n description: \"Search packages in crates.io\",\n options: [{\n name: \"--index\",\n description: \"Registry index URL to upload the package to\",\n args: {\n name: \"index\"\n }\n }, {\n name: \"--limit\",\n description: \"Limit the number of results (default: 10, max: 100)\",\n args: {\n name: \"limit\"\n }\n }, {\n name: \"--registry\",\n description: \"Registry to use\",\n args: {\n name: \"registry\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\"\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"query\",\n generators: H,\n filterStrategy: \"fuzzy\",\n debounce: !0,\n isVariadic: !0,\n suggestCurrentToken: !0\n }\n }, {\n name: [\"test\", \"t\"],\n icon: \"\\u{1F4E6}\",\n description: \"Execute all unit and integration tests and build examples of a local package\",\n options: [{\n name: \"--bin\",\n description: \"Test only the specified binary\",\n isRepeatable: !0,\n args: {\n name: \"bin\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bin\"\n }),\n isVariadic: !0\n }\n }, {\n name: \"--example\",\n description: \"Test only the specified example\",\n isRepeatable: !0,\n args: {\n name: \"example\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"example\"\n })\n }\n }, {\n name: \"--test\",\n description: \"Test only the specified test target\",\n isRepeatable: !0,\n args: {\n name: \"test\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"test\"\n })\n }\n }, {\n name: \"--bench\",\n description: \"Test only the specified bench target\",\n isRepeatable: !0,\n args: {\n name: \"bench\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bench\"\n })\n }\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package to run tests for\",\n isRepeatable: !0,\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--exclude\",\n description: \"Exclude packages from the test\",\n isRepeatable: !0,\n args: {\n name: \"exclude\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--profile\",\n description: \"Build artifacts with the specified profile\",\n args: {\n name: \"profile\"\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--target\",\n description: \"Build for the target triple\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"target-dir\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--message-format\",\n description: \"Error format\",\n isRepeatable: !0,\n args: {\n name: \"message-format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Display one character per test instead of one line\"\n }, {\n name: \"--lib\",\n description: \"Test only this package's library unit tests\"\n }, {\n name: \"--bins\",\n description: \"Test all binaries\"\n }, {\n name: \"--examples\",\n description: \"Test all examples\"\n }, {\n name: \"--tests\",\n description: \"Test all tests\"\n }, {\n name: \"--benches\",\n description: \"Test all benches\"\n }, {\n name: \"--all-targets\",\n description: \"Test all targets\"\n }, {\n name: \"--doc\",\n description: \"Test only this library's documentation\"\n }, {\n name: \"--no-run\",\n description: \"Compile, but don't run tests\"\n }, {\n name: \"--no-fail-fast\",\n description: \"Run all tests regardless of failure\"\n }, {\n name: \"--workspace\",\n description: \"Test all packages in the workspace\"\n }, {\n name: \"--all\",\n description: \"Alias for --workspace (deprecated)\",\n hidden: !0\n }, {\n name: [\"-r\", \"--release\"],\n description: \"Build artifacts in release mode, with optimizations\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--ignore-rust-version\",\n description: \"Ignore `rust-version` specification in packages\"\n }, {\n name: \"--unit-graph\",\n description: \"Output build graph in JSON (unstable)\"\n }, {\n name: \"--future-incompat-report\",\n description: \"Outputs a future incompatibility report at the end of the build\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: \"--timings\",\n description: \"Timing output formats (unstable)\"\n }],\n args: [{\n name: \"TESTNAME\",\n isOptional: !0\n }, {\n name: \"args\",\n isOptional: !0,\n isVariadic: !0\n }]\n }, {\n name: \"tree\",\n icon: \"\\u{1F4E6}\",\n description: \"Display a tree visualization of a dependency graph\",\n options: [{\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package to be used as the root of the tree\",\n isRepeatable: !0,\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--exclude\",\n description: \"Exclude specific workspace members\",\n isRepeatable: !0,\n args: {\n name: \"exclude\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n isRepeatable: !0,\n args: {\n name: \"features\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--target\",\n description: \"Filter dependencies matching the given target-triple (default host platform). Pass `all` to include all targets\",\n isRepeatable: !0,\n args: {\n name: \"target\",\n suggestions: [\"all\"],\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: [\"-e\", \"--edges\"],\n description: \"The kinds of dependencies to display (features, normal, build, dev, all, no-normal, no-build, no-dev, no-proc-macro)\",\n isRepeatable: !0,\n args: {\n name: \"edges\",\n suggestions: [\"features\", \"normal\", \"build\", \"dev\", \"all\", \"no-normal\", \"no-build\", \"no-dev\", \"no-proc-macro\"]\n }\n }, {\n name: [\"-i\", \"--invert\"],\n description: \"Invert the tree direction and focus on the given package\",\n isRepeatable: !0,\n args: {\n name: \"invert\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: F\n }\n }, {\n name: \"--prune\",\n description: \"Prune the given package from the display of the dependency tree\",\n isRepeatable: !0,\n args: {\n name: \"prune\",\n filterStrategy: \"fuzzy\",\n generators: F\n }\n }, {\n name: \"--depth\",\n description: \"Maximum display depth of the dependency tree\",\n args: {\n name: \"depth\"\n }\n }, {\n name: \"--prefix\",\n description: \"Change the prefix (indentation) of how each entry is displayed\",\n args: {\n name: \"prefix\",\n suggestions: [\"depth\", \"indent\", \"none\"]\n }\n }, {\n name: \"--charset\",\n description: \"Character set to use in output: utf8, ascii\",\n args: {\n name: \"charset\",\n suggestions: [\"utf8\", \"ascii\"]\n }\n }, {\n name: [\"-f\", \"--format\"],\n description: \"Format string used for printing dependencies\",\n args: {\n name: \"format\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--workspace\",\n description: \"Display the tree for all packages in the workspace\"\n }, {\n name: [\"-a\", \"--all\"]\n }, {\n name: \"--all-targets\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--no-dev-dependencies\"\n }, {\n name: \"--no-indent\"\n }, {\n name: \"--prefix-depth\"\n }, {\n name: \"--no-dedupe\",\n description: \"Do not de-duplicate (repeats all shared dependencies)\"\n }, {\n name: [\"-d\", \"--duplicates\"],\n description: \"Show only dependencies which come in multiple versions (implies -i)\"\n }, {\n name: [\"-V\", \"--version\"]\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"uninstall\",\n icon: \"\\u{1F4E6}\",\n description: \"Remove a Rust binary\",\n options: [{\n name: [\"-p\", \"--package\"],\n description: \"Package to uninstall\",\n isRepeatable: !0,\n args: {\n name: \"package\",\n isVariadic: !0\n }\n }, {\n name: \"--bin\",\n description: \"Only uninstall the binary NAME\",\n isRepeatable: !0,\n args: {\n name: \"bin\"\n }\n }, {\n name: \"--root\",\n description: \"Directory to uninstall packages from\",\n args: {\n name: \"root\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"SPEC\",\n generators: {\n script: [\"bash\", \"-c\", \"cargo install --list | \\\\grep -E \\\"^[a-zA-Z\\\\-]+\\\\sv\\\" | cut -d ' ' -f 1\"],\n splitOn: \"\\n\"\n },\n isVariadic: !0\n }\n }, {\n name: \"update\",\n icon: \"\\u{1F4E6}\",\n description: \"Update dependencies as recorded in the local lock file\",\n options: [{\n name: [\"-p\", \"--package\"],\n description: \"Package to update\",\n isRepeatable: !0,\n args: {\n name: \"package\",\n isVariadic: !0,\n filterStrategy: \"fuzzy\",\n generators: F\n }\n }, {\n name: \"--precise\",\n description: \"Update a single dependency to exactly PRECISE when used with -p\",\n dependsOn: [\"--package\", \"-p\"],\n args: {\n name: \"precise\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-w\", \"--workspace\"],\n description: \"Only update the workspace packages\"\n }, {\n name: \"--aggressive\",\n description: \"Force updating all dependencies of SPEC as well when used with -p\"\n }, {\n name: \"--dry-run\",\n description: \"Don't actually write the lockfile\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"vendor\",\n icon: \"\\u{1F4E6}\",\n description: \"Vendor all dependencies for a project locally\",\n options: [{\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: [\"-s\", \"--sync\"],\n description: \"Additional `Cargo.toml` to sync and vendor\",\n isRepeatable: !0,\n args: {\n name: \"tomls\",\n isVariadic: !0\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--no-delete\",\n description: \"Don't delete older crates in the vendor directory\"\n }, {\n name: \"--respect-source-config\",\n description: \"Respect `[source]` config in `.cargo/config`\",\n isRepeatable: !0\n }, {\n name: \"--versioned-dirs\",\n description: \"Always include version in subdir name\"\n }, {\n name: \"--no-merge-sources\"\n }, {\n name: \"--relative-path\"\n }, {\n name: \"--only-git-deps\"\n }, {\n name: \"--disallow-duplicates\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"path\"\n }\n }, {\n name: \"verify-project\",\n icon: \"\\u{1F4E6}\",\n description: \"Check correctness of crate manifest\",\n options: [{\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"version\",\n icon: \"\\u{1F4E6}\",\n description: \"Show version information\",\n options: [{\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }]\n }, {\n name: \"yank\",\n icon: \"\\u{1F4E6}\",\n description: \"Remove a pushed crate from the index\",\n options: [{\n name: \"--vers\",\n description: \"The version to yank or un-yank\",\n args: {\n name: \"vers\"\n }\n }, {\n name: \"--index\",\n description: \"Registry index to yank from\",\n args: {\n name: \"index\"\n }\n }, {\n name: \"--token\",\n description: \"API token to use when authenticating\",\n args: {\n name: \"token\"\n }\n }, {\n name: \"--registry\",\n description: \"Registry to use\",\n args: {\n name: \"registry\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--undo\",\n description: \"Undo a yank, putting a version back into the index\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"crate\"\n }\n }, {\n name: \"help\",\n icon: \"\\u{1F4E6}\",\n description: \"Print this message or the help of the given subcommand(s)\",\n options: [{\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n args: {\n name: \"subcommand\"\n }\n }, {\n name: \"add\",\n icon: \"\\u{1F4E6}\",\n description: \"Add dependencies to a Cargo.toml manifest file\",\n options: [{\n name: \"--no-default-features\",\n description: \"Disable the default features\"\n }, {\n name: \"--default-features\",\n description: \"Re-enable the default features\"\n }, {\n name: [\"-F\", \"--features\"],\n description: \"Space or comma separated list of features to activate\"\n }, {\n name: \"--optional\",\n description: \"Mark the dependency as optional\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output\"\n }, {\n name: \"--no-optional\",\n description: \"Mark the dependency as required\"\n }, {\n name: \"--color\",\n args: {\n name: \"WHEN\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--rename\",\n description: \"Rename the dependency\",\n args: {\n name: \"NAME\"\n }\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package to modify\",\n args: {\n name: \"SPEC\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--dry-run\",\n description: \"Don't actually write the manifest\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: \"--path\",\n description: \"Filesystem path to local crate to add\",\n args: {\n name: \"PATH\",\n template: \"folders\"\n }\n }, {\n name: \"--git\",\n description: \"Git repository location\",\n args: {\n name: \"URI\"\n }\n }, {\n name: \"--branch\",\n description: \"Git branch to download the crate from\",\n dependsOn: [\"--git\"],\n args: {\n name: \"BRANCH\"\n }\n }, {\n name: \"--tag\",\n description: \"Git tag to download the crate from\",\n dependsOn: [\"--git\"],\n args: {\n name: \"TAG\"\n }\n }, {\n name: \"--rev\",\n description: \"Git reference to download the crate from\",\n dependsOn: [\"--git\"],\n args: {\n name: \"REV\"\n }\n }, {\n name: \"--registry\",\n description: \"Package registry for this dependency\",\n args: {\n name: \"NAME\"\n }\n }, {\n name: \"--dev\",\n description: \"Add as development dependency\"\n }, {\n name: \"--build\",\n description: \"Add as build dependency\"\n }, {\n name: \"--target\",\n description: \"Add as dependency to the given target platform\",\n args: {\n name: \"TARGET\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }],\n args: {\n name: \"DEP_ID\",\n generators: H,\n filterStrategy: \"fuzzy\",\n debounce: !0,\n isVariadic: !0,\n suggestCurrentToken: !0\n }\n }, {\n name: [\"remove\", \"rm\"],\n icon: \"\\u{1F4E6}\",\n description: \"Remove dependencies from a Cargo.toml manifest file\",\n options: [{\n name: \"--dev\",\n description: \"Remove as development dependency\"\n }, {\n name: \"--build\",\n description: \"Remove as build dependency\"\n }, {\n name: \"--target\",\n description: \"Remove as dependency to the given target platform\",\n args: {\n name: \"TARGET\",\n filterStrategy: \"fuzzy\",\n generators: b\n }\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package to remove from\",\n args: {\n name: \"SPEC\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\"\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--dry-run\",\n description: \"Don't actually write the manifest\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output\"\n }, {\n name: \"--color\",\n args: {\n name: \"WHEN\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }],\n args: {\n name: \"DEP_ID\",\n generators: Fe,\n filterStrategy: \"fuzzy\",\n isVariadic: !0\n }\n }],\n options: [{\n name: \"--explain\",\n description: \"Run `rustc --explain CODE`\",\n args: {\n name: \"explain\"\n }\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"color\",\n suggestions: [\"always\", \"never\", \"auto\"]\n }\n }, {\n name: \"--config\",\n description: \"Override a configuration value\",\n isRepeatable: !0,\n args: {\n name: \"config\",\n generators: d\n }\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n isRepeatable: !0,\n args: {\n name: \"unstable-features\"\n }\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-V\", \"--version\"],\n description: \"Print version info and exit\"\n }, {\n name: \"--list\",\n description: \"List installed commands\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\",\n isRepeatable: !0\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print cargo log messages\"\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }],\n generateSpec: function () {\n var _generateSpec = _asyncToGenerator(function* (t, a) {\n var [{\n stdout: i\n }, {\n stdout: n\n }] = yield Promise.all([a({\n command: \"rustup\",\n args: [\"toolchain\", \"list\"]\n }), a({\n command: \"cargo\",\n args: [\"--list\"]\n })]),\n o = i.split(\"\\n\").map(r => ({\n icon: \"\\u{1F9F0}\",\n name: \"+\".concat(r.split(\"-\")[0]),\n description: r\n })),\n s = [],\n l = n.split(\"\\n\").filter((r, p) => p != 0).map(r => r.trim().split(/\\s+/, 1)[0]);\n if (l.includes(\"fmt\")) {\n var r = {\n name: \"fmt\",\n icon: \"\\u{1F6E0}\",\n description: \"This utility formats all bin and lib files of the current crate using rustfmt\",\n subcommands: [{\n name: \"--\",\n description: \"All other arguments are passed to rustfmt\",\n args: {\n generators: (0, m.filepaths)({\n extensions: [\"rs\"]\n })\n },\n options: [{\n name: \"--check\",\n description: \"Run in 'check' mode. Exits with 0 if input is formatted correctly. Exits with 1 and prints a diff if formatting is required\"\n }, {\n name: \"--emit\",\n description: \"What data to emit and how\",\n args: {\n suggestions: [\"files\", \"stdout\"]\n }\n }, {\n name: \"--backup\",\n description: \"Backup any modified files\"\n }, {\n name: \"--config-path\",\n description: \"Path for the configuration file\",\n args: {\n generators: (0, m.filepaths)({\n equals: [\"rustfmt.toml\"]\n })\n }\n }, {\n name: \"--edition\",\n description: \"Rust edition to use\",\n args: {\n suggestions: G\n }\n }, {\n name: \"--print-config\",\n description: \"Dumps a default or minimal config to PATH\",\n args: [{\n name: \"verbosity\",\n suggestions: [\"default\", \"minimal\", \"current\"]\n }, {\n name: \"PATH\",\n template: \"filepaths\"\n }]\n }, {\n name: [\"-l\", \"--files-with-diff\"],\n description: \"Prints the names of mismatched files that were formatted\"\n }]\n }],\n options: [{\n name: \"--check\",\n description: \"Run rustfmt in check mode\"\n }, {\n name: \"--all\",\n description: \"Format all packages, and also their local path-based dependencies\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"No output printed to stdout\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output\"\n }, {\n name: \"--version\",\n description: \"Print rustfmt version and exit\"\n }, {\n name: \"--manifest-path\",\n description: \"Specify path to Cargo.toml\",\n args: {\n name: \"manifest-path\",\n generators: (0, m.filepaths)({\n equals: [\"Cargo.toml\"]\n })\n }\n }, {\n name: \"--message-format\",\n description: \"Specify message-format\",\n args: {\n name: \"message-format\",\n suggestions: [\"short\", \"json\", \"human\"]\n }\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Specify package to format\",\n args: {\n name: \"package\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }]\n };\n s.push(r);\n }\n if (l.includes(\"clippy\")) {\n var _r = {\n name: \"clippy\",\n icon: \"\\u{1F4CE}\",\n description: \"Runs the Clippy linter\",\n subcommands: [{\n name: \"--\",\n description: \"All other arguments are passed to clippy\",\n options: [{\n name: [\"-W\", \"--warn\"],\n description: \"Set lint warnings\",\n args: {}\n }, {\n name: [\"-A\", \"--allow\"],\n description: \"Set lint allowed\",\n args: {}\n }, {\n name: [\"-D\", \"--deny\"],\n description: \"Set lint denied\",\n args: {}\n }, {\n name: [\"-F\", \"--forbid\"],\n description: \"Set lint forbidden\",\n args: {}\n }]\n }],\n options: [{\n name: \"--no-deps\",\n description: \"Run Clippy only on the given crate, without linting the dependencies\"\n }, {\n name: \"--fix\",\n description: \"Automatically apply lint suggestions. This flag implies `--no-deps`\"\n }, {\n name: \"--allow-dirty\",\n description: \"Allow fix to apply even if the working directory is dirty\",\n dependsOn: [\"--fix\"]\n }, {\n name: \"--allow-staged\",\n description: \"Allow fix to apply even if the working directory has staged changes\",\n dependsOn: [\"--fix\"]\n }]\n };\n s.push(_r);\n }\n if (l.includes(\"flamegraph\")) {\n var _r2 = {\n name: \"flamegraph\",\n icon: \"\\u{1F525}\",\n description: \"Generates a flamegraph of the current crate\",\n options: [{\n name: \"--deterministic\",\n description: \"Colors are selected such that the color of a function does not change between runs\"\n }, {\n name: \"--dev\",\n description: \"Build with the dev profile\"\n }, {\n name: [\"-i\", \"--inverted\"],\n description: \"Plot the flame graph up-side-down\"\n }, {\n name: \"--no-default-features\",\n description: \"Disable default features\"\n }, {\n name: \"--open\",\n description: \"Open the output .svg file with default program\"\n }, {\n name: \"--reverse\",\n description: \"Generate stack-reversed flame graph\"\n }, {\n name: \"--root\",\n description: \"Run with root privileges (using `sudo`)\"\n }, {\n name: \"--no-inline\",\n description: \"Disable inlining for perf script because of performance issues\"\n }]\n };\n s.push(_r2);\n }\n if (l.includes(\"audit\")) {\n var _r3 = {\n name: \"audit\",\n icon: \"\\u{1F4DA}\",\n description: \"Runs the cargo audit tool\",\n options: [{\n name: [\"-d\", \"--db\"],\n description: \"Advisory database git repo path\",\n args: {\n name: \"DB\",\n template: \"folders\"\n }\n }, {\n name: [\"-D\", \"--deny\"],\n description: \"Exit with an error on the argument\",\n args: {\n isVariadic: !0,\n suggestions: [{\n name: \"warnings\",\n description: \"Warnings (any)\"\n }, {\n name: \"unmaintained\",\n description: \"Unmaintained crates\"\n }, {\n name: \"unsound\",\n description: \"Unsound Rust code\"\n }, {\n name: \"yanked\",\n description: \"Yanked crates\"\n }]\n }\n }, {\n name: [\"-f\", \"--file\"],\n description: \"Cargo lockfile to inspect\",\n args: {\n suggestions: [{\n name: \"-\",\n description: \"Stdin\"\n }],\n generators: (0, m.filepaths)({\n equals: [\"Cargo.lock\"]\n })\n }\n }, {\n name: [\"-n\", \"--no-fetch\"],\n description: \"Do not perform a git fetch on the advisory DB\"\n }, {\n name: \"--stale\",\n description: \"Allow stale database\"\n }, {\n name: \"--target-arch\",\n description: \"Filter vulnerabilities by CPU\",\n args: {}\n }, {\n name: \"--target-os\",\n description: \"Filter vulnerabilities by OS\",\n args: {}\n }, {\n name: [\"-u\", \"--url\"],\n description: \"URL for advisory database git repo\"\n }, {\n name: \"--json\",\n description: \"Output report in JSON format\"\n }, {\n name: \"--no-local-crates\",\n description: \"Vulnerability querying does not consider local crates\"\n }]\n };\n s.push(_r3);\n }\n if (l.includes(\"outdated\")) {\n var _r4 = {\n name: \"outdated\",\n icon: \"\\u{1F4E6}\",\n description: \"Displays information about project dependency versions\",\n options: [{\n name: [\"-a\", \"--aggressive\"],\n description: \"Ignores channels for latest updates\"\n }, {\n name: \"--color\",\n description: \"Output coloring\",\n args: {\n name: \"COLOR\",\n suggestions: [\"always\", \"never\", \"auto\"],\n default: \"auto\"\n }\n }, {\n name: [\"-d\", \"--depth\"],\n description: \"How deep in the dependency chain to search (Defaults to all dependencies when omitted)\",\n args: {\n name: \"DEPTH\"\n },\n exclusiveOn: [\"-R\", \"--root-deps-only\"]\n }, {\n name: [\"-x\", \"--exclude\"],\n description: \"Exclude a dependency from the output\",\n isRequired: !0,\n args: {\n name: \"DEPENDENCY\",\n filterStrategy: \"fuzzy\",\n generators: F\n }\n }, {\n name: \"--exit-code\",\n description: \"The exit code to return on new versions found\",\n args: {\n name: \"NUM\",\n suggestions: [\"0\", \"1\"],\n default: \"0\"\n }\n }, {\n name: \"--features\",\n description: \"Space-separated list of features\",\n args: {\n name: \"FEATURES\",\n generators: w,\n isVariadic: !0\n }\n }, {\n name: \"--format\",\n description: \"Output formatting\",\n args: {\n name: \"FORMAT\",\n suggestions: [\"json\", \"list\"],\n default: \"list\"\n }\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Prints help information\"\n }, {\n name: [\"-i\", \"--ignore\"],\n description: \"Dependencies to not print in the output\",\n args: {\n name: \"DEPENDENCY\",\n filterStrategy: \"fuzzy\",\n generators: F\n }\n }, {\n name: [\"-e\", \"--ignore-external-rel\"],\n description: \"Ignore relative dependencies external to workspace and check root dependencies only\"\n }, {\n name: [\"-m\", \"--manifest-path\"],\n description: \"Path to the Cargo.toml file to use\",\n args: {\n name: \"PATH\",\n generators: (0, m.filepaths)({\n equals: [\"Cargo.toml\"]\n })\n }\n }, {\n name: [\"-o\", \"--offline\"],\n description: \"Run without accessing the network\"\n }, {\n name: [\"-p\", \"--packages\"],\n description: \"Packages to inspect for updates\",\n args: {\n name: \"PACKAGES\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Suppresses warnings\"\n }, {\n name: [\"-r\", \"--root\"],\n description: \"Package to treat as the root package\",\n args: {\n name: \"PACKAGE\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-R\", \"--root-deps-only\"],\n description: \"Only check root dependencies\",\n exclusiveOn: [\"-d\", \"--depth\"]\n }, {\n name: [\"-V\", \"--version\"],\n description: \"Prints version information\"\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output\"\n }, {\n name: [\"-w\", \"--workspace\"],\n description: \"Checks updates for all workspace members rather than only the root package\"\n }]\n };\n s.push(_r4);\n }\n if (l.includes(\"udeps\")) {\n var _r5 = {\n name: \"udeps\",\n icon: \"\\u{1F4E6}\",\n description: \"Find unused dependencies in Cargo.toml files\",\n options: [{\n name: [\"-q\", \"--quiet\"],\n description: \"No output printed to stdout\"\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package(s) to check\",\n args: {\n name: \"SPEC\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--all\",\n description: \"Alias for --workspace (deprecated)\",\n hidden: !0,\n deprecated: !0\n }, {\n name: \"--workspace\",\n description: \"Check all packages in the workspace\"\n }, {\n name: \"--exclude\",\n description: \"Exclude packages from the check\",\n args: {\n name: \"SPEC\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"N\"\n }\n }, {\n name: \"--lib\",\n description: \"Check only this package's library\"\n }, {\n name: \"--bin\",\n description: \"Check only the specified binary\",\n args: {\n name: \"NAME\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bin\"\n })\n }\n }, {\n name: \"--bins\",\n description: \"Check all binaries\"\n }, {\n name: \"--example\",\n description: \"Check only the specified example\",\n args: {\n name: \"NAME\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"example\"\n })\n }\n }, {\n name: \"--examples\",\n description: \"Check all examples\"\n }, {\n name: \"--test\",\n description: \"Check only the specified test target\",\n args: {\n name: \"NAME\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"test\"\n })\n }\n }, {\n name: \"--tests\",\n description: \"Check all tests\"\n }, {\n name: \"--bench\",\n description: \"Check only the specified bench target\",\n args: {\n name: \"NAME\",\n filterStrategy: \"fuzzy\",\n generators: u({\n kind: \"bench\"\n })\n }\n }, {\n name: \"--benches\",\n description: \"Check all benches\"\n }, {\n name: \"--all-targets\",\n description: \"Check all targets\"\n }, {\n name: \"--release\",\n description: \"Check artifacts in release mode, with optimizations\"\n }, {\n name: \"--profile\",\n description: \"Check artifacts with the specified profile\",\n args: {\n name: \"PROFILE-NAME\"\n }\n }, {\n name: \"--features\",\n description: \"Space-separated list of features to activate\",\n args: {\n name: \"FEATURES\",\n isVariadic: !0\n }\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--target\",\n description: \"Check for the target triple\",\n args: {\n name: \"TRIPLE\"\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"DIRECTORY\"\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"PATH\"\n }\n }, {\n name: \"--message-format\",\n description: \"Error format\",\n args: {\n name: \"FMT\",\n default: \"human\",\n suggestions: [\"human\", \"json\", \"short\"]\n }\n }, {\n name: [\"-v\", \"--verbose\"],\n description: \"Use verbose output (-vv very verbose/build.rs output)\"\n }, {\n name: \"--color\",\n description: \"Coloring\",\n args: {\n name: \"WHEN\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network\"\n }, {\n name: \"--output\",\n description: \"Output format\",\n args: {\n name: \"OUTPUT\",\n default: \"human\",\n suggestions: [\"human\", \"json\"]\n }\n }, {\n name: \"--backend\",\n description: \"Backend to use for determining unused deps\",\n args: {\n name: \"BACKEND\",\n suggestions: [\"save-analysis\", \"depinfo\"]\n }\n }, {\n name: \"--keep-going\",\n description: \"Needed because the keep-going flag is asked about by cargo code\"\n }, {\n name: \"--show-unused-transitive\",\n description: \"Show unused dependencies that get used transitively by main dependencies. Works only with 'save-analysis' backend\",\n dependsOn: [\"--backend\"]\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-V\", \"--version\"],\n description: \"Print version information\"\n }]\n };\n s.push(_r5);\n }\n if (l.includes(\"deny\")) {\n var _r6 = {\n name: \"deny\",\n icon: \"\\u274C\",\n description: \"Cargo plugin to help you manage large dependency graphs\",\n subcommands: [{\n name: \"check\",\n description: \"Checks a project's crate graph\",\n options: [{\n name: \"--audit-compatible-output\",\n description: \"To ease transition from cargo-audit to cargo-deny, this flag will tell cargo-deny to output the exact same output as cargo-audit would, to `stdout` instead of `stderr`, just as with cargo-audit\"\n }, {\n name: [\"-c\", \"--config\"],\n description: \"Path to the config to use. Defaults to /deny.toml if not specified\",\n args: {\n name: \"CONFIG\",\n generators: (0, m.filepaths)({\n equals: \"deny.toml\"\n })\n }\n }, {\n name: [\"-d\", \"--disable-fetch\"],\n description: \"Disable fetching of the advisory database\"\n }, {\n name: [\"-g\", \"--graph\"],\n description: \"Path to graph_output root directory\",\n args: {\n name: \"GRAPH\",\n template: \"folders\"\n }\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: \"--hide-inclusion-graph\",\n description: \"Hides the inclusion graph when printing out info for a crate\"\n }, {\n name: [\"-s\", \"--show-stats\"],\n description: \"Show stats for all the checks, regardless of the log-level\"\n }],\n args: {\n name: \"WHICH\",\n isOptional: !0,\n suggestions: [{\n name: \"advisories\",\n description: \"Checks for known security vulnerabilities\"\n }, {\n name: \"ban\",\n description: \"Checks for banned crates\"\n }, {\n name: \"bans\",\n description: \"Checks for banned crates\"\n }, {\n name: \"license\",\n description: \"Checks for crates with unknown licenses\"\n }, {\n name: \"licenses\",\n description: \"Checks for crates with unknown licenses\"\n }, {\n name: \"sources\",\n description: \"Checks for crates with unknown sources\"\n }, {\n name: \"all\",\n description: \"Runs all checks\"\n }],\n isVariadic: !0\n }\n }, {\n name: \"fetch\",\n description: \"Fetches remote data\",\n options: [{\n name: [\"-c\", \"--config\"],\n description: \"Path to the config to use\",\n args: {\n name: \"CONFIG\",\n generators: (0, m.filepaths)({\n equals: \"deny.toml\"\n })\n }\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }],\n args: {\n name: \"SOURCES\",\n isOptional: !0,\n suggestions: [{\n name: \"db\",\n description: \"Fetches the advisory database\"\n }, {\n name: \"index\",\n description: \"Fetches the crates.io index\"\n }, {\n name: \"all\",\n description: \"Fetches all remote data\"\n }]\n }\n }, {\n name: \"help\",\n description: \"Print this message or the help of the given subcommand(s)\",\n args: {\n template: \"help\",\n isOptional: !0\n }\n }, {\n name: \"init\",\n description: \"Creates a cargo-deny config from a template\",\n options: [{\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }],\n args: {\n name: \"CONFIG\",\n description: \"The path to create\",\n generators: (0, m.filepaths)({\n equals: \"deny.toml\"\n })\n }\n }, {\n name: \"list\",\n description: \"Outputs a listing of all licenses and the crates that use them\",\n options: [{\n name: [\"-c\", \"--config\"],\n description: \"Path to the config to use\",\n args: {\n name: \"CONFIG\",\n generators: (0, m.filepaths)({\n equals: \"deny.toml\"\n })\n }\n }, {\n name: [\"-f\", \"--format\"],\n description: \"The format of the output\",\n args: {\n name: \"FORMAT\",\n suggestions: [\"human\", \"json\", \"tsv\"]\n }\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-l\", \"--layout\"],\n description: \"The layout for the output\",\n args: {\n name: \"LAYOUT\",\n suggestions: [{\n name: \"crate\"\n }, {\n name: \"license\"\n }]\n }\n }, {\n name: [\"-t\", \"--threshold\"],\n description: \"Minimum confidence threshold for license text\",\n args: {\n name: \"THRESHOLD\",\n suggestions: [\"0.0\", \"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\", \"0.6\", \"0.7\", \"0.8\", \"0.9\", \"1.0\"]\n }\n }]\n }],\n options: [{\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: [\"-c\", \"--color\"],\n description: \"Coloring\",\n args: {\n name: \"WHEN\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--exclude\",\n description: \"One or more crates to exclude from the crate graph that is used\",\n args: {\n name: \"EXCLUDE\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: [\"-f\", \"--format\"],\n description: \"Specify the format of cargo-deny's output\",\n args: {\n name: \"FORMAT\",\n default: \"human\",\n suggestions: [\"human\", \"json\"]\n }\n }, {\n name: \"--features\",\n description: \"Space or comma separated list of features to activate\",\n args: {\n name: \"FEATURES\",\n isVariadic: !0\n }\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-L\", \"--log-level\"],\n description: \"The log level for messages\",\n args: {\n name: \"LOG_LEVEL\",\n default: \"warn\",\n suggestions: [\"off\", \"error\", \"warn\", \"info\", \"debug\", \"trace\"]\n }\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"--manifest-path\",\n description: \"The path of a Cargo.toml to use as the context for the operation\",\n args: {\n name: \"MANIFEST_PATH\"\n }\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--offline\",\n description: \"Run without accessing the network. If used with the `check` subcommand, this also disables advisory database fetching\"\n }, {\n name: [\"-t\", \"--target\"],\n description: \"One or more platforms to filter crates by\",\n args: {\n name: \"TARGET\"\n }\n }, {\n name: [\"-V\", \"--version\"],\n description: \"Print version information\"\n }, {\n name: \"--workspace\",\n description: \"If passed, all workspace packages are used as roots for the crate graph\"\n }]\n };\n s.push(_r6);\n }\n if (l.includes(\"bloat\")) {\n var _r7 = {\n name: \"bloat\",\n icon: \"\\u2696\\uFE0F\",\n description: \"Find out what takes most of the space in your executable\",\n options: [{\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-V\", \"--version\"],\n description: \"Print version information\"\n }, {\n name: \"--lib\",\n description: \"Build only this package's library\"\n }, {\n name: \"--bin\",\n description: \"Build only the specified binary\",\n args: {\n name: \"NAME\"\n }\n }, {\n name: \"--example\",\n description: \"Build only the specified example\",\n args: {\n name: \"NAME\"\n }\n }, {\n name: \"--test\",\n description: \"Build only the specified test target\",\n args: {\n name: \"NAME\"\n }\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package to build\",\n args: {\n name: \"SPEC\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--release\",\n description: \"Build artifacts in release mode, with optimizations\"\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"N\"\n }\n }, {\n name: \"--features\",\n description: \"Space-separated list of features to activate\",\n args: {\n name: \"FEATURES\"\n }\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--profile\",\n description: \"Build with the given profile\",\n args: {\n name: \"PROFILE\"\n }\n }, {\n name: \"--target\",\n description: \"Build for the target triple\",\n args: {\n name: \"TARGET\"\n }\n }, {\n name: \"--target-dir\",\n description: \"Directory for all generated artifacts\",\n args: {\n name: \"DIRECTORY\"\n }\n }, {\n name: \"--frozen\",\n description: \"Require Cargo.lock and cache are up to date\"\n }, {\n name: \"--locked\",\n description: \"Require Cargo.lock is up to date\"\n }, {\n name: \"-Z\",\n description: \"Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details\",\n args: {\n name: \"FLAG\",\n isVariadic: !0\n }\n }, {\n name: \"--crates\",\n description: \"Per crate bloatedness\"\n }, {\n name: \"--time\",\n description: \"Per crate build time. Will run `cargo clean` first\"\n }, {\n name: \"--filter\",\n description: \"Filter functions by crate\",\n args: {\n name: \"CRATE|REGEXP\"\n }\n }, {\n name: \"--split-std\",\n description: \"Split the 'std' crate to original crates like core, alloc, etc\"\n }, {\n name: \"--symbols-section\",\n description: \"Use custom symbols section (ELF-only)\",\n args: {\n name: \"NAME\",\n default: \".text\"\n }\n }, {\n name: \"--no-relative-size\",\n description: \"Hide 'File' and '.text' columns\"\n }, {\n name: \"--full-fn\",\n description: \"Print full function name with hash values\"\n }, {\n name: \"-n\",\n description: \"Number of lines to show, 0 to show all [default: 20]\",\n args: {\n name: \"NUM\",\n default: \"20\"\n }\n }, {\n name: [\"-w\", \"--wide\"],\n description: \"Do not trim long function names\"\n }, {\n name: \"--message-format\",\n description: \"Output format\",\n args: {\n name: \"FMT\",\n default: \"table\",\n suggestions: [\"table\", \"json\"]\n }\n }]\n };\n s.push(_r7);\n }\n if (l.includes(\"sort\")) {\n var _r8 = {\n name: \"sort\",\n icon: \"\\u{1F6E0}\",\n description: \"Ensure Cargo.toml dependency tables are sorted\",\n options: [{\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-V\", \"--version\"],\n description: \"Print version information\"\n }, {\n name: [\"-c\", \"--check\"],\n description: \"Non-zero exit if Cargo.toml is unsorted, overrides default behavior\"\n }, {\n name: [\"-g\", \"--grouped\"],\n description: \"When sorting groups of key value pairs blank lines are kept\"\n }, {\n name: [\"-p\", \"--print\"],\n description: \"Prints Cargo.toml, lexically sorted, to stdout\"\n }, {\n name: [\"-w\", \"--workspace\"],\n description: \"Checks every crate in a workspace\"\n }, {\n name: [\"-n\", \"--no-format\"],\n description: \"Skip formatting after sorting\",\n args: {\n name: \"no-format\"\n }\n }, {\n name: [\"-o\", \"--order\"],\n description: \"When sorting groups of key value pairs blank lines are kept\",\n args: {\n name: \"order\"\n }\n }],\n args: {\n name: \"CWD\",\n description: \"The directory to run the command in\",\n isOptional: !0,\n template: \"folders\"\n }\n };\n s.push(_r8);\n }\n if (l.includes(\"fuzz\")) {\n var _r9 = {\n name: \"fuzz\",\n icon: \"\\u{1F6E0}\",\n description: \"A `cargo` subcommand for fuzzing with `libFuzzer`!\",\n subcommands: [{\n name: \"add\",\n description: \"Add a new fuzz target\"\n }, {\n name: \"build\",\n description: \"Build fuzz targets\"\n }, {\n name: \"cmin\",\n description: \"Minify a corpus\"\n }, {\n name: \"coverage\",\n description: \"Run program on the generated corpus and generate coverage information\"\n }, {\n name: \"fmt\",\n description: \"Print the `std::fmt::Debug` output for an input\"\n }, {\n name: \"help\",\n description: \"Prints this message or the help of the given subcommand(s)\"\n }, {\n name: \"init\",\n description: \"Initialize the fuzz directory\"\n }, {\n name: \"list\",\n description: \"List all the existing fuzz targets\"\n }, {\n name: \"run\",\n description: \"Run a fuzz target\"\n }, {\n name: \"tmin\",\n description: \"Minify a test case\"\n }]\n };\n s.push(_r9);\n }\n if (l.includes(\"insta\")) {\n var _r10 = [{\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-V\", \"--version\"],\n description: \"Print version information\"\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"WHEN\",\n default: \"auto\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }, {\n name: \"--manifest-path\",\n description: \"Path to Cargo.toml\",\n args: {\n name: \"PATH\",\n generators: (0, m.filepaths)({\n equals: \"Cargo.toml\"\n })\n }\n }, {\n name: \"--workspace-root\",\n description: \"Explicit path to the workspace root\",\n args: {\n name: \"PATH\",\n template: \"folders\"\n }\n }, {\n name: [\"-e\", \"--extensions\"],\n description: \"Sets the extensions to consider. Defaults to `.snap`\",\n args: {\n name: \"EXTENSIONS\",\n isVariadic: !0\n }\n }, {\n name: \"--all\",\n description: \"Work on all packages in the workspace\"\n }, {\n name: \"--no-ignore\",\n description: \"Also walk into ignored paths\"\n }],\n p = {\n name: \"insta\",\n icon: \"\\u{1F6E0}\",\n description: \"A `cargo` subcommand for snapshot testing\",\n subcommands: [{\n name: \"review\",\n description: \"Interactively review snapshots\",\n options: [..._r10, {\n name: \"--snapshot\",\n description: \"Limits the operation to one or more snapshots\",\n args: {\n name: \"snapshot-filter\",\n isVariadic: !0\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print to stdout\"\n }]\n }, {\n name: \"reject\",\n description: \"Rejects all snapshots\",\n options: [..._r10, {\n name: \"--snapshot\",\n description: \"Limits the operation to one or more snapshots\",\n args: {\n name: \"snapshot-filter\",\n isVariadic: !0\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print to stdout\"\n }]\n }, {\n name: \"accept\",\n description: \"Accepts all snapshots\",\n options: [..._r10, {\n name: \"--snapshot\",\n description: \"Limits the operation to one or more snapshots\",\n args: {\n name: \"snapshot-filter\",\n isVariadic: !0\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print to stdout\"\n }]\n }, {\n name: \"test\",\n description: \"Run tests and then reviews\",\n options: [..._r10, {\n name: \"--snapshot\",\n description: \"Limits the operation to one or more snapshots\",\n args: {\n name: \"snapshot-filter\",\n isVariadic: !0\n }\n }, {\n name: [\"-q\", \"--quiet\"],\n description: \"Do not print to stdout\"\n }, {\n name: [\"-p\", \"--package\"],\n description: \"Package to run tests for\",\n args: {\n name: \"SPEC\",\n filterStrategy: \"fuzzy\",\n generators: f\n }\n }, {\n name: \"--no-force-pass\",\n description: \"Disable force-passing of snapshot tests\"\n }, {\n name: \"--fail-fast\",\n description: \"Prevent running all tests regardless of failure\"\n }, {\n name: \"--features\",\n description: \"Space-separated list of features to activate\",\n args: {\n name: \"features\"\n }\n }, {\n name: [\"-j\", \"--jobs\"],\n description: \"Number of parallel jobs, defaults to # of CPUs\",\n args: {\n name: \"jobs\"\n }\n }, {\n name: \"--release\",\n description: \"Build artifacts in release mode, with optimizations\"\n }, {\n name: \"--all-features\",\n description: \"Activate all available features\"\n }, {\n name: \"--no-default-features\",\n description: \"Do not activate the `default` feature\"\n }, {\n name: \"--review\",\n description: \"Follow up with review\"\n }, {\n name: \"--accept\",\n description: \"Accept all snapshots after test\"\n }, {\n name: \"--accept-unseen\",\n description: \"Accept all new (previously unseen)\"\n }, {\n name: \"--keep-pending\",\n description: \"Do not reject pending snapshots before run\"\n }, {\n name: \"--force-update-snapshots\",\n description: \"Update all snapshots even if they are still matching\"\n }, {\n name: \"--delete-unreferenced-snapshots\",\n description: \"Delete unreferenced snapshots after the test run\"\n }, {\n name: \"--glob-filter\",\n description: \"Filters to apply to the insta glob feature\",\n args: {\n name: \"glob-filter\",\n isVariadic: !0\n }\n }, {\n name: [\"-Q\", \"--no-quiet\"],\n description: \"Do not pass the quiet flag (`-q`) to tests\"\n }, {\n name: \"--test-runner\",\n description: \"Picks the test runner\",\n args: {\n name: \"test-runner\"\n }\n }]\n }, {\n name: \"pending\",\n description: \"Print a summary of all pending snapshots\",\n options: [..._r10, {\n name: \"--as-json\",\n description: \"Changes the output from human readable to JSON\"\n }]\n }, {\n name: \"show\",\n description: \"Shows a specific snapshot\",\n options: _r10,\n args: {\n name: \"path\",\n description: \"The path to the snapshot file\",\n generators: (0, m.filepaths)({\n extensions: [\"snap\"]\n })\n }\n }],\n options: [{\n name: [\"-h\", \"--help\"],\n description: \"Print help information\"\n }, {\n name: [\"-V\", \"--version\"],\n description: \"Print version information\"\n }, {\n name: \"--color\",\n description: \"Coloring: auto, always, never\",\n args: {\n name: \"WHEN\",\n default: \"auto\",\n suggestions: [\"auto\", \"always\", \"never\"]\n }\n }]\n };\n s.push(p);\n }\n if (l.includes(\"make\")) {\n var _r11 = {\n name: \"make\",\n icon: \"\\u{1F6E0}\",\n description: \"Rust cargo-make task runner and build tool\",\n args: {\n name: \"TASK\",\n filterStrategy: \"fuzzy\",\n isVariadic: !0,\n isOptional: !0,\n generators: se\n },\n options: [{\n name: [\"--help\", \"-h\"],\n description: \"Print help information\"\n }, {\n name: [\"--version\", \"-V\"],\n description: \"Print version information\"\n }, {\n name: \"--makefile\",\n description: \"The optional toml file containing the tasks definitions\",\n args: {\n name: \"FILE\",\n template: \"filepaths\"\n }\n }, {\n name: [\"--task\", \"-t\"],\n description: \"The task name to execute\",\n args: {\n name: \"TASK\",\n filterStrategy: \"fuzzy\",\n isVariadic: !0,\n isOptional: !0,\n generators: se\n }\n }, {\n name: [\"--profile\", \"-p\"],\n description: \"The profile name\",\n args: {\n name: \"PROFILE\",\n default: \"development\"\n }\n }, {\n name: \"--cwd\",\n description: \"Set the current working directory\",\n args: {\n name: \"DIRECTORY\",\n template: \"folders\"\n }\n }, {\n name: \"--no-workspace\",\n description: \"Disable workspace support\"\n }, {\n name: \"--no-on-error\",\n description: \"Disable on error flow even if defined in config sections\"\n }, {\n name: \"--allow-private\",\n description: \"Allow invocation of private tasks\"\n }, {\n name: \"--skip-init-end-tasks\",\n description: \"If set, init and end tasks are skipped\"\n }, {\n name: \"--skip-tasks\",\n description: \"Skip all tasks that match the provided regex\",\n args: {\n name: \"SKIP_TASK_PATTERNS\"\n }\n }, {\n name: \"--env-file\",\n description: \"Set environment variables from provided file\",\n args: {\n name: \"FILE\",\n template: \"filepaths\"\n }\n }, {\n name: [\"--env\", \"-e\"],\n description: \"Set environment variables\",\n args: {\n name: \"ENV\"\n }\n }, {\n name: [\"--loglevel\", \"-l\"],\n description: \"The log level\",\n args: {\n name: \"LOG LEVEL\",\n suggestions: [\"verbose\", \"info\", \"error\", \"off\"]\n }\n }, {\n name: [\"--verbose\", \"-v\"],\n description: \"Sets the log level to verbose\"\n }, {\n name: \"--quiet\",\n description: \"Sets the log level to error\"\n }, {\n name: \"--silent\",\n description: \"Sets the log level to off\"\n }, {\n name: \"--no-color\",\n description: \"Disables colorful output\"\n }, {\n name: \"--time-summary\",\n description: \"Print task level time summary at end of flow\"\n }, {\n name: \"--experimental\",\n description: \"Allows access to unsupported experimental predefined tasks\"\n }, {\n name: \"--disable-check-for-updates\",\n description: \"Disables the update check during startup\"\n }, {\n name: \"--output-format\",\n description: \"The print/list steps format\",\n args: {\n name: \"OUTPUT FORMAT\",\n suggestions: [\"default\", \"short-description\", \"markdown\", \"markdown-single-page\", \"markdown-sub-section\", \"autocomplete\"]\n }\n }, {\n name: \"--output-file\",\n description: \"The list steps output file name\",\n args: {\n name: \"OUTPUT_FILE\",\n template: \"filepaths\"\n }\n }, {\n name: \"--hide-uninteresting\",\n description: \"Hide any minor tasks such as pre/post hooks\"\n }, {\n name: \"--print-steps\",\n description: \"Only prints the steps of the build in the order they will be invoked but without invoking them\"\n }, {\n name: \"--list-all-steps\",\n description: \"Lists all known steps\"\n }, {\n name: \"--list-category-steps\",\n description: \"List steps for a given category\",\n args: {\n name: \"CATEGORY\"\n }\n }, {\n name: \"--diff-steps\",\n description: \"Runs diff between custom flow and prebuilt flow (requires git)\"\n }]\n };\n s.push(_r11);\n }\n return {\n name: \"cargo\",\n subcommands: s,\n options: o\n };\n });\n function generateSpec(_x10, _x11) {\n return _generateSpec.apply(this, arguments);\n }\n return generateSpec;\n }()\n };\n },\n Me = je();\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/cargo.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/cat.js": +/*!*********************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/cat.js ***! + \*********************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ t)\n/* harmony export */ });\nvar e = {\n name: \"cat\",\n description: \"Concatenate and print files\",\n args: {\n isVariadic: !0,\n template: \"filepaths\"\n },\n options: [{\n name: \"-b\",\n description: \"Number the non-blank output lines, starting at 1\"\n }, {\n name: \"-e\",\n description: \"Display non-printing characters (see the -v option), and display a dollar sign (\\u2018$\\u2019) at the end of each line\"\n }, {\n name: \"-l\",\n description: \"Set an exclusive advisory lock on the standard output file descriptor. This lock is set using fcntl(2) with the F_SETLKW command. If the output file is already locked, cat will block until the lock is acquired\"\n }, {\n name: \"-n\",\n description: \"Number the output lines, starting at 1\"\n }, {\n name: \"-s\",\n description: \"Squeeze multiple adjacent empty lines, causing the output to be single spaced\"\n }, {\n name: \"-t\",\n description: \"Display non-printing characters (see the -v option), and display tab characters as \\u2018^I\\u2019\"\n }, {\n name: \"-u\",\n description: \"Disable output buffering\"\n }, {\n name: \"-v\",\n description: \"Display non-printing characters so they are visible. Control characters print as \\u2018^X\\u2019 for control-X; the delete character (octal 0177) prints as \\u2018^?\\u2019. Non-ASCII characters (with the high bit set) are printed as \\u2018M-\\u2019 (for meta) followed by the character for the low 7 bits\"\n }]\n },\n t = e;\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/cat.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/cd.js": +/*!********************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/cd.js ***! + \********************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ Fe)\n/* harmony export */ });\nvar U = Object.create;\nvar W = Object.defineProperty;\nvar Y = Object.getOwnPropertyDescriptor;\nvar ee = Object.getOwnPropertyNames;\nvar te = Object.getPrototypeOf,\n ne = Object.prototype.hasOwnProperty;\nvar j = (e, t) => () => (t || e((t = {\n exports: {}\n}).exports, t), t.exports);\nvar re = (e, t, n, r) => {\n if (t && typeof t == \"object\" || typeof t == \"function\") {\n var _loop = function _loop(i) {\n !ne.call(e, i) && i !== n && W(e, i, {\n get: () => t[i],\n enumerable: !(r = Y(t, i)) || r.enumerable\n });\n };\n for (var i of ee(t)) {\n _loop(i);\n }\n }\n return e;\n};\nvar ie = (e, t, n) => (n = e != null ? U(te(e)) : {}, re(t || !e || !e.__esModule ? W(n, \"default\", {\n value: e,\n enumerable: !0\n}) : n, e));\nvar z = j(I => {\n \"use strict\";\n\n Object.defineProperty(I, \"__esModule\", {\n value: !0\n });\n I.shellExpand = I.ensureTrailingSlash = void 0;\n var se = e => e.endsWith(\"/\") ? e : \"\".concat(e, \"/\");\n I.ensureTrailingSlash = se;\n var oe = (e, t) => e.startsWith(\"~\") && (e.length === 1 || e.charAt(1) === \"/\") ? e.replace(\"~\", t) : e,\n le = (e, t) => e.replace(/\\$([A-Za-z0-9_]+)/g, i => {\n var s;\n var l = i.slice(1);\n return (s = t[l]) !== null && s !== void 0 ? s : i;\n }).replace(/\\$\\{([A-Za-z0-9_]+)(?::-([^}]+))?\\}/g, (i, s, l) => {\n var c, a;\n return (a = (c = t[s]) !== null && c !== void 0 ? c : l) !== null && a !== void 0 ? a : i;\n }),\n ue = (e, t) => {\n var n;\n var {\n environmentVariables: r\n } = t;\n return le(oe(e, (n = r === null || r === void 0 ? void 0 : r.HOME) !== null && n !== void 0 ? n : \"~\"), r);\n };\n I.shellExpand = ue;\n});\nvar $ = j(g => {\n \"use strict\";\n\n var ce = g && g.__awaiter || function (e, t, n, r) {\n function i(s) {\n return s instanceof n ? s : new n(function (l) {\n l(s);\n });\n }\n return new (n || (n = Promise))(function (s, l) {\n function c(o) {\n try {\n u(r.next(o));\n } catch (f) {\n l(f);\n }\n }\n function a(o) {\n try {\n u(r.throw(o));\n } catch (f) {\n l(f);\n }\n }\n function u(o) {\n o.done ? s(o.value) : i(o.value).then(c, a);\n }\n u((r = r.apply(e, t || [])).next());\n });\n };\n Object.defineProperty(g, \"__esModule\", {\n value: !0\n });\n g.filepaths = g.folders = g.getCurrentInsertedDirectory = g.sortFilesAlphabetically = void 0;\n var C = z();\n function R(e) {\n var t = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : [];\n var n = t.map(i => i.toLowerCase()),\n r = e.filter(i => !n.includes(i.toLowerCase()));\n return [...r.filter(i => !i.startsWith(\".\")).sort((i, s) => i.localeCompare(s)), ...r.filter(i => i.startsWith(\".\")).sort((i, s) => i.localeCompare(s)), \"../\"];\n }\n g.sortFilesAlphabetically = R;\n var ae = (e, t, n) => {\n if (e === null) return \"/\";\n var r = (0, C.shellExpand)(t, n),\n i = r.slice(0, r.lastIndexOf(\"/\") + 1);\n return i === \"\" ? (0, C.ensureTrailingSlash)(e) : i.startsWith(\"/\") ? i : \"\".concat((0, C.ensureTrailingSlash)(e)).concat(i);\n };\n g.getCurrentInsertedDirectory = ae;\n function L() {\n var e = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n var {\n extensions: t = [],\n equals: n = [],\n matches: r,\n filterFolders: i = !1,\n editFileSuggestions: s,\n editFolderSuggestions: l,\n rootDirectory: c,\n showFolders: a = \"always\"\n } = e,\n u = new Set(t),\n o = new Set(n),\n f = () => t.length > 0 || n.length > 0 || r,\n p = function p() {\n var h = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : [];\n return f() ? h.filter(_ref => {\n var {\n name: v = \"\",\n type: b\n } = _ref;\n if (!i && b === \"folder\" || o.has(v) || r && v.match(r)) return !0;\n var [, ...d] = v.split(\".\");\n if (d.length >= 1) {\n var O = d.length - 1,\n m = d[O];\n do {\n if (u.has(m)) return !0;\n O -= 1, m = [d[O], m].join(\".\");\n } while (O >= 0);\n }\n return !1;\n }) : h;\n },\n S = function S() {\n var h = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : [];\n return !s && !l ? h : h.map(v => Object.assign(Object.assign({}, v), (v.type === \"file\" ? s : l) || {}));\n };\n return {\n trigger: (h, v) => {\n var b = h.lastIndexOf(\"/\"),\n d = v.lastIndexOf(\"/\");\n return b !== d ? !0 : b === -1 && d === -1 ? !1 : h.slice(0, b) !== v.slice(0, d);\n },\n getQueryTerm: h => h.slice(h.lastIndexOf(\"/\") + 1),\n custom: (h, v, b) => ce(this, void 0, void 0, function* () {\n var d;\n var {\n isDangerous: O,\n currentWorkingDirectory: m,\n searchTerm: y\n } = b,\n w = (d = (0, g.getCurrentInsertedDirectory)(c !== null && c !== void 0 ? c : m, y, b)) !== null && d !== void 0 ? d : \"/\";\n try {\n var N = yield v({\n command: \"ls\",\n args: [\"-1ApL\"],\n cwd: w\n }),\n Z = R(N.stdout.split(\"\\n\"), [\".DS_Store\"]),\n P = [];\n for (var V of Z) if (V) {\n var F = V.endsWith(\"/\") ? \"folders\" : \"filepaths\";\n (F === \"filepaths\" && a !== \"only\" || F === \"folders\" && a !== \"never\") && P.push({\n type: F === \"filepaths\" ? \"file\" : \"folder\",\n name: V,\n insertValue: V,\n isDangerous: O,\n context: {\n templateType: F\n }\n });\n }\n return S(p(P));\n } catch (_unused) {\n return [];\n }\n })\n };\n }\n g.folders = Object.assign(() => L({\n showFolders: \"only\"\n }), Object.freeze(L({\n showFolders: \"only\"\n })));\n g.filepaths = Object.assign(L, Object.freeze(L()));\n});\nvar k = j(x => {\n \"use strict\";\n\n var A = x && x.__awaiter || function (e, t, n, r) {\n function i(s) {\n return s instanceof n ? s : new n(function (l) {\n l(s);\n });\n }\n return new (n || (n = Promise))(function (s, l) {\n function c(o) {\n try {\n u(r.next(o));\n } catch (f) {\n l(f);\n }\n }\n function a(o) {\n try {\n u(r.throw(o));\n } catch (f) {\n l(f);\n }\n }\n function u(o) {\n o.done ? s(o.value) : i(o.value).then(c, a);\n }\n u((r = r.apply(e, t || [])).next());\n });\n };\n Object.defineProperty(x, \"__esModule\", {\n value: !0\n });\n x.keyValueList = x.keyValue = x.valueList = void 0;\n var K = new Map();\n function D(e, t) {\n return e.length === 0 ? t : t.map(n => n.insertValue ? n : Object.assign(Object.assign({}, n), {\n insertValue: n.name + e\n }));\n }\n function J(e, t, n) {\n return A(this, void 0, void 0, function* () {\n if (typeof e == \"function\") {\n var r = yield e(...n);\n return D(t, r);\n }\n if (typeof e[0] == \"string\") {\n var _r = e.map(i => ({\n name: i\n }));\n return D(t, _r);\n }\n return D(t, e);\n });\n }\n function q(e, t, n, r) {\n return A(this, void 0, void 0, function* () {\n if (n || Array.isArray(e)) {\n var i = K.get(e);\n return i === void 0 && (i = yield J(e, t, r), K.set(e, i)), i;\n }\n return J(e, t, r);\n });\n }\n function Q(e, t) {\n return typeof t == \"string\" ? e && t === \"keys\" || !e && t === \"values\" : t;\n }\n function M(e) {\n for (var _len = arguments.length, t = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {\n t[_key - 1] = arguments[_key];\n }\n return Math.max(...t.map(n => e.lastIndexOf(n)));\n }\n function E(e, t) {\n var n = new Set(e);\n return t.filter(r => {\n var i;\n return typeof r.name == \"string\" ? !n.has(r.name) : !(!((i = r.name) === null || i === void 0) && i.some(s => n.has(s)));\n });\n }\n function fe(_ref2) {\n var _this = this;\n var {\n delimiter: e = \",\",\n values: t = [],\n cache: n = !1,\n insertDelimiter: r = !1,\n allowRepeatedValues: i = !1\n } = _ref2;\n return {\n trigger: (s, l) => s.lastIndexOf(e) !== l.lastIndexOf(e),\n getQueryTerm: s => s.slice(s.lastIndexOf(e) + e.length),\n custom: function custom() {\n for (var _len2 = arguments.length, s = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {\n s[_key2] = arguments[_key2];\n }\n return A(_this, void 0, void 0, function* () {\n var l;\n var c = yield q(t, r ? e : \"\", n, s);\n if (i) return c;\n var [a] = s,\n u = (l = a[a.length - 1]) === null || l === void 0 ? void 0 : l.split(e);\n return E(u, c);\n });\n }\n };\n }\n x.valueList = fe;\n function de(_ref3) {\n var _this2 = this;\n var {\n separator: e = \"=\",\n keys: t = [],\n values: n = [],\n cache: r = !1,\n insertSeparator: i = !0\n } = _ref3;\n return {\n trigger: (s, l) => s.indexOf(e) !== l.indexOf(e),\n getQueryTerm: s => s.slice(s.indexOf(e) + 1),\n custom: function custom() {\n for (var _len3 = arguments.length, s = new Array(_len3), _key3 = 0; _key3 < _len3; _key3++) {\n s[_key3] = arguments[_key3];\n }\n return A(_this2, void 0, void 0, function* () {\n var [l] = s,\n a = !l[l.length - 1].includes(e),\n u = a ? t : n,\n o = Q(a, r);\n return q(u, a && i ? e : \"\", o, s);\n });\n }\n };\n }\n x.keyValue = de;\n function he(_ref4) {\n var _this3 = this;\n var {\n separator: e = \"=\",\n delimiter: t = \",\",\n keys: n = [],\n values: r = [],\n cache: i = !1,\n insertSeparator: s = !0,\n insertDelimiter: l = !1,\n allowRepeatedKeys: c = !1,\n allowRepeatedValues: a = !0\n } = _ref4;\n return {\n trigger: (u, o) => {\n var f = M(u, e, t),\n p = M(o, e, t);\n return f !== p;\n },\n getQueryTerm: u => {\n var o = M(u, e, t);\n return u.slice(o + 1);\n },\n custom: function custom() {\n for (var _len4 = arguments.length, u = new Array(_len4), _key4 = 0; _key4 < _len4; _key4++) {\n u[_key4] = arguments[_key4];\n }\n return A(_this3, void 0, void 0, function* () {\n var [o] = u,\n f = o[o.length - 1],\n p = M(f, e, t),\n S = p === -1 || f.slice(p, p + e.length) !== e,\n h = S ? n : r,\n v = Q(S, i),\n d = yield q(h, S ? s ? e : \"\" : l ? t : \"\", v, u);\n if (S) {\n if (c) return d;\n var m = f.split(t).map(y => y.slice(0, y.indexOf(e)));\n return E(m, d);\n }\n if (a) return d;\n var O = f.split(t).map(m => m.slice(m.indexOf(e) + e.length));\n return E(O, d);\n });\n }\n };\n }\n x.keyValueList = he;\n});\nvar H = j(T => {\n \"use strict\";\n\n var ye = T && T.__awaiter || function (e, t, n, r) {\n function i(s) {\n return s instanceof n ? s : new n(function (l) {\n l(s);\n });\n }\n return new (n || (n = Promise))(function (s, l) {\n function c(o) {\n try {\n u(r.next(o));\n } catch (f) {\n l(f);\n }\n }\n function a(o) {\n try {\n u(r.throw(o));\n } catch (f) {\n l(f);\n }\n }\n function u(o) {\n o.done ? s(o.value) : i(o.value).then(c, a);\n }\n u((r = r.apply(e, t || [])).next());\n });\n };\n Object.defineProperty(T, \"__esModule\", {\n value: !0\n });\n T.ai = void 0;\n var ve = 4097,\n pe = 4,\n ge = .8,\n _e = ve * pe * ge;\n function Oe(_ref5) {\n var {\n name: e,\n prompt: t,\n message: n,\n postProcess: r,\n temperature: i,\n splitOn: s\n } = _ref5;\n return {\n scriptTimeout: 15e3,\n custom: (l, c, a) => ye(this, void 0, void 0, function* () {\n var u, o;\n var f = yield c({\n command: \"fig\",\n args: [\"settings\", \"--format\", \"json\", \"autocomplete.ai.enabled\"]\n });\n if (!JSON.parse(f.stdout)) return [];\n var p = typeof t == \"function\" ? yield t({\n tokens: l,\n executeCommand: c,\n generatorContext: a\n }) : t,\n S = typeof n == \"function\" ? yield n({\n tokens: l,\n executeCommand: c,\n generatorContext: a\n }) : n;\n if (S === null || S.length === 0) return console.warn(\"No message provided to AI generator\"), [];\n var h = _e - ((u = p === null || p === void 0 ? void 0 : p.length) !== null && u !== void 0 ? u : 0),\n v = {\n model: \"gpt-3.5-turbo\",\n source: \"autocomplete\",\n name: e,\n messages: [...(p ? [{\n role: \"system\",\n content: p\n }] : []), {\n role: \"user\",\n content: S.slice(0, h)\n }],\n temperature: i\n },\n b = JSON.stringify(v),\n d = yield c({\n command: \"fig\",\n args: [\"_\", \"request\", \"--route\", \"/ai/chat\", \"--method\", \"POST\", \"--body\", b]\n }),\n O = JSON.parse(d.stdout);\n return (o = O === null || O === void 0 ? void 0 : O.choices.map(y => {\n var w;\n return (w = y === null || y === void 0 ? void 0 : y.message) === null || w === void 0 ? void 0 : w.content;\n }).filter(y => typeof y == \"string\").flatMap(y => s ? y.split(s).filter(w => w.trim().length > 0) : [y]).map(y => {\n if (r) return r(y);\n var w = y.trim().replace(/\\n/g, \" \");\n return {\n icon: \"\\u{1FA84}\",\n name: w,\n insertValue: \"'\".concat(w, \"'\"),\n description: \"Generated by Fig AI\"\n };\n })) !== null && o !== void 0 ? o : [];\n })\n };\n }\n T.ai = Oe;\n});\nvar G = j(_ => {\n \"use strict\";\n\n var be = _ && _.__createBinding || (Object.create ? function (e, t, n, r) {\n r === void 0 && (r = n);\n var i = Object.getOwnPropertyDescriptor(t, n);\n (!i || (\"get\" in i ? !t.__esModule : i.writable || i.configurable)) && (i = {\n enumerable: !0,\n get: function get() {\n return t[n];\n }\n }), Object.defineProperty(e, r, i);\n } : function (e, t, n, r) {\n r === void 0 && (r = n), e[r] = t[n];\n }),\n me = _ && _.__exportStar || function (e, t) {\n for (var n in e) n !== \"default\" && !Object.prototype.hasOwnProperty.call(t, n) && be(t, e, n);\n };\n Object.defineProperty(_, \"__esModule\", {\n value: !0\n });\n _.ai = _.folders = _.filepaths = void 0;\n var B = $();\n Object.defineProperty(_, \"filepaths\", {\n enumerable: !0,\n get: function get() {\n return B.filepaths;\n }\n });\n Object.defineProperty(_, \"folders\", {\n enumerable: !0,\n get: function get() {\n return B.folders;\n }\n });\n me(k(), _);\n var Se = H();\n Object.defineProperty(_, \"ai\", {\n enumerable: !0,\n get: function get() {\n return Se.ai;\n }\n });\n});\nvar X = ie(G(), 1),\n we = {\n name: \"cd\",\n description: \"Change the shell working directory\",\n args: {\n generators: (0, X.filepaths)({\n showFolders: \"only\"\n }),\n filterStrategy: \"fuzzy\",\n suggestions: [{\n name: \"-\",\n description: \"Switch to the last used folder\",\n hidden: !0\n }, {\n name: \"~\",\n description: \"Switch to the home directory\",\n hidden: !0\n }]\n }\n },\n Fe = we;\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/cd.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/chmod.js": +/*!***********************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/chmod.js ***! + \***********************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ i)\n/* harmony export */ });\nvar e = {\n name: \"chmod\",\n description: \"Change file modes or Access Control Lists\",\n args: [{\n name: \"mode\",\n suggestions: [{\n name: \"u+x\",\n type: \"arg\",\n description: \"Give execute permission for the user\",\n icon: \"\\u{1F510}\"\n }, {\n name: \"a+rx\",\n type: \"arg\",\n description: \"Adds read and execute permissions for all classes\",\n icon: \"\\u{1F510}\"\n }, {\n name: \"744\",\n type: \"arg\",\n description: \"Sets read, write, and execute permissions for user, and sets read permission for Group and Others\",\n icon: \"\\u{1F510}\"\n }, {\n name: \"664\",\n type: \"arg\",\n description: \"Sets read and write permissions for user and Group, and provides read to Others\",\n icon: \"\\u{1F510}\"\n }, {\n name: \"777\",\n type: \"arg\",\n description: \"\\u26A0\\uFE0F allows all actions for all users\",\n icon: \"\\u{1F510}\"\n }]\n }, {\n template: \"filepaths\"\n }],\n options: [{\n name: \"-f\",\n description: \"Do not display a diagnostic message if chmod could not modify the mode for file, nor modify the exit status to reflect such failures\"\n }, {\n name: \"-H\",\n description: \"If the -R option is specified, symbolic links on the command line are followed and hence unaffected by the command. (Symbolic links encountered during tree traversal are not followed.)\"\n }, {\n name: \"-h\",\n description: \"If the file is a symbolic link, change the mode of the link itself rather than the file that the link points to\"\n }, {\n name: \"-L\",\n description: \"If the -R option is specified, all symbolic links are followed\"\n }, {\n name: \"-P\",\n description: \"If the -R option is specified, no symbolic links are followed. This is the default\"\n }, {\n name: \"-R\",\n description: \"Change the modes of the file hierarchies rooted in the files, instead of just the files themselves. Beware of unintentionally matching the ``..'' hard link to the parent directory when using wildcards like ``.*''\"\n }, {\n name: \"-v\",\n description: \"Cause chmod to be verbose, showing filenames as the mode is modified. If the -v flag is specified more than once, the old and new modes of the file will also be printed, in both octal and symbolic notation\"\n }]\n },\n i = e;\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/chmod.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/chown.js": +/*!***********************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/chown.js ***! + \***********************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ c),\n/* harmony export */ existingUsersandGroups: () => (/* binding */ t)\n/* harmony export */ });\nfunction asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }\nfunction _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, \"next\", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, \"throw\", err); } _next(undefined); }); }; }\nvar t = {\n custom: function () {\n var _custom = _asyncToGenerator(function* (n, r) {\n var i = n.find(e => e.includes(\":\")),\n o = n.find(e => /^-.*n.*/.test(e)),\n s;\n if (i) {\n var {\n stdout: e\n } = yield r({\n command: \"bash\",\n args: [\"-c\", \"dscl . -list /Groups PrimaryGroupID | tr -s ' '| sort -r\"]\n });\n s = e;\n } else {\n var {\n stdout: _e\n } = yield r({\n command: \"bash\",\n args: [\"-c\", \"dscl . -list /Users UniqueID | tr -s ' '| sort -r\"]\n });\n s = _e;\n }\n return s.split(\"\\n\").map(e => e.split(\" \")).map(e => ({\n name: o ? e[1] : e[0],\n description: i ? \"Group - \".concat(o ? e[0] : \"gid: \".concat(e[1])) : \"User - \".concat(o ? e[0] : \"uid: \".concat(e[1])),\n icon: i ? \"\\u{1F465}\" : \"\\u{1F464}\",\n priority: 90\n }));\n });\n function custom(_x, _x2) {\n return _custom.apply(this, arguments);\n }\n return custom;\n }(),\n trigger: \":\",\n getQueryTerm: \":\"\n },\n a = {\n name: \"chown\",\n description: \"Change the user and/or group ownership of a given file, directory, or symbolic link\",\n args: [{\n name: \"owner[:group] or :group\",\n generators: t\n }, {\n name: \"file/directory\",\n isVariadic: !0,\n template: [\"filepaths\", \"folders\"]\n }],\n options: [{\n name: \"-f\",\n description: \"Don't report any failure to change file owner or group, nor modify the exit status to reflect such failures\"\n }, {\n name: \"-h\",\n description: \"If the file is a symbolic link, change the user ID and/or the group ID of the link itself\"\n }, {\n name: \"-n\",\n description: \"Interpret user ID and group ID as numeric, avoiding name lookups\"\n }, {\n name: \"-v\",\n description: \"Cause chown to be verbose, showing files as the owner is modified\"\n }, {\n name: \"-R\",\n description: \"Change the user ID and/or the group ID for the file hierarchies rooted in the files instead of just the files themselves\"\n }, {\n name: \"-H\",\n description: \"If the -R option is specified, symbolic links on the command line are followed\",\n exclusiveOn: [\"-L\", \"-P\"],\n dependsOn: [\"-R\"]\n }, {\n name: \"-L\",\n description: \"If the -R option is specified, all symbolic links are followed\",\n exclusiveOn: [\"-H\", \"-P\"],\n dependsOn: [\"-R\"]\n }, {\n name: \"-P\",\n description: \"If the -R option is specified, no symbolic links are followed\",\n exclusiveOn: [\"-H\", \"-L\"],\n dependsOn: [\"-R\"]\n }]\n },\n c = a;\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/chown.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/circleci.js": +/*!**************************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/circleci.js ***! + \**************************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ \"default\": () => (/* binding */ i)\n/* harmony export */ });\nvar e = {\n name: \"circleci\",\n description: \"CircleCI CLI\",\n subcommands: [{\n name: \"completion\",\n description: \"Generate shell completion scripts\",\n subcommands: [{\n name: \"bash\",\n description: \"Generate bash completion scripts\"\n }, {\n name: \"zsh\",\n description: \"Generate zsh completion scripts\"\n }]\n }, {\n name: \"config\",\n description: \"Operate on build config files\",\n subcommands: [{\n name: \"pack\",\n description: \"Pack CircleCI config files into a single file\"\n }, {\n name: \"process\",\n description: \"Validate and display extended config\"\n }, {\n name: \"validate\",\n description: \"Checks that config is valid\"\n }]\n }, {\n name: \"context\",\n description: \"Secure and share environment variables across projects\",\n subcommands: [{\n name: \"create\",\n description: \"Create a new context\",\n args: [{\n name: \"vcs-type\",\n description: \"Your VCS provider, can be either 'github' or 'bitbucket'\",\n suggestions: [\"github\", \"bitbucket\"],\n isOptional: !0\n }, {\n name: \"org-name\",\n description: \"The name of your organization\",\n isOptional: !0\n }, {\n name: \"context-name\",\n description: \"The name for your context\"\n }]\n }, {\n name: \"delete\",\n description: \"Delete the named context\",\n args: [{\n name: \"vcs-type\",\n description: \"Your VCS provider, can be either 'github' or 'bitbucket'\",\n suggestions: [\"github\", \"bitbucket\"]\n }, {\n name: \"org-name\",\n description: \"The name of your organization\"\n }, {\n name: \"context-name\",\n description: \"The name for your context\"\n }]\n }, {\n name: \"list\",\n description: \"List all contexts\",\n args: [{\n name: \"vcs-type\",\n description: \"Your VCS provider, can be either 'github' or 'bitbucket'\",\n suggestions: [\"github\", \"bitbucket\"]\n }, {\n name: \"org-name\",\n description: \"The name of your organization\"\n }]\n }, {\n name: \"remove-secret\",\n description: \"Remove environment variable from a context\",\n args: [{\n name: \"vcs-type\",\n description: \"Your VCS provider, can be either 'github' or 'bitbucket'\",\n suggestions: [\"github\", \"bitbucket\"]\n }, {\n name: \"org-name\",\n description: \"The name of your organization\"\n }, {\n name: \"context-name\",\n description: \"The name for your context\"\n }, {\n name: \"secret name\",\n description: \"The name of the env variable to remove\"\n }]\n }, {\n name: \"show\",\n description: \"Show a context\",\n args: [{\n name: \"vcs-type\",\n description: \"Your VCS provider, can be either 'github' or 'bitbucket'\",\n suggestions: [\"github\", \"bitbucket\"]\n }, {\n name: \"org-name\",\n description: \"The name of your organization\"\n }, {\n name: \"context-name\",\n description: \"The name for your context\"\n }]\n }, {\n name: \"store-secret\",\n description: \"Store environment variables\",\n args: [{\n name: \"vcs-type\",\n description: \"Your VCS provider, can be either 'github' or 'bitbucket'\",\n suggestions: [\"github\", \"bitbucket\"]\n }, {\n name: \"org-name\",\n description: \"The name of your organization\"\n }, {\n name: \"context-name\",\n description: \"The name for your context\"\n }, {\n name: \"secret name\",\n description: \"The name of the env variable to store\"\n }]\n }]\n }, {\n name: \"diagnostic\",\n description: \"Check the status of your CircleCI CLI\"\n }, {\n name: \"follow\",\n description: \"Attempt to follow the project for the current git repo\"\n }, {\n name: \"help\",\n description: \"Help about any command\"\n }, {\n name: \"local\",\n description: \"Debug jobs on the local machine\",\n subcommands: [{\n name: \"execute\",\n description: \"Run a job in a container on the local machine\"\n }]\n }, {\n name: \"namespace\",\n description: \"Operate on namespaces\",\n subcommands: [{\n name: \"create\",\n description: \"Create a namespace\"\n }]\n }, {\n name: \"open\",\n description: \"Open the current project in the browser\"\n }, {\n name: \"orb\",\n description: \"Operate on orbs\",\n subcommands: [{\n name: \"add-to-category\",\n description: \"Add an orb to a category\",\n args: [{\n name: \"namespace/orb\",\n description: \"The namespace and orb to add to a category\"\n }, {\n name: \"category name\",\n description: \"The name of the category to add the orb to, in quotes\"\n }]\n }, {\n name: \"create\",\n description: \"Create an orb in a namespace\",\n args: {\n name: \"namespace/orb\",\n description: \"Create an orb in the specified namespace\"\n },\n options: [{\n name: \"--private\",\n description: \"Specify that this orb is for private use within your org, unlisted from the public registry\"\n }]\n }, {\n name: \"info\",\n description: \"Show metadata of an orb\",\n args: {\n name: \"orb\",\n description: \"The namespace and orb to show metadata for\"\n }\n }, {\n name: \"init\",\n description: \"Initialize a new orb\",\n args: {\n name: \"path\",\n description: \"The /path/to/myProject-orb\"\n },\n options: [{\n name: \"--private\",\n description: \"Specify that this orb is for private use within your org, unlisted from the public registry\"\n }]\n }, {\n name: \"list\",\n description: \"List orbs\",\n args: {\n name: \"namespace\",\n description: \"The namespace used for the orb (i.e. circleci)\"\n },\n options: [{\n name: \"--private\",\n description: \"Specify that this orb is for private use within your org, unlisted from the public registry\"\n }, {\n name: \"--sort\",\n description: \"Specify the sorting\",\n args: {\n suggestions: [\"builds\", \"projects\", \"orgs\"]\n }\n }, {\n name: [\"-u\", \"--uncertified\"],\n description: \"Include uncertified orbs\"\n }]\n }, {\n name: \"list-categories\",\n description: \"List orb categories\"\n }, {\n name: \"pack\",\n description: \"Pack an orb with local scripts\",\n args: {\n name: \"path\",\n description: \"The /path/to/myProject-orb\"\n }\n }, {\n name: \"process\",\n description: \"Validate an orb and print its form after all pre-registration processing is complete\",\n args: {\n name: \"path\",\n description: \"The path to your orb (use '-' for STDIN)\"\n }\n }, {\n name: \"publish\",\n description: \"Publish an orb to the registry\",\n args: [{\n name: \"path\",\n description: \"The /path/to/myProject-orb\"\n }, {\n name: \"orb\",\n description: \"A fully-qualified reference to an orb, i.e. namespace/orb@version\"\n }]\n }, {\n name: \"remove-from-category\",\n description: \"Remove an orb from a category\",\n args: [{\n name: \"namespace/orb\",\n description: \"The namespace and orb to add to a category\"\n }, {\n name: \"category name\",\n description: \"The name of the category to add the orb to, in quotes\"\n }]\n }, {\n name: \"source\",\n description: \"Show source code of an orb\",\n args: {\n name: \"orb\",\n description: \"A fully-qualified reference to an orb, i.e. namespace/orb@version\"\n }\n }, {\n name: \"unlist\",\n description: \"Disable/enable an orb's listing in the registry\",\n args: [{\n name: \"namespace/orb\",\n description: \"The namespace and orb to unlist/list from the registry\"\n }, {\n name: \"condition\",\n description: \"Use either true|false\",\n suggestions: [\"true\", \"false\"]\n }]\n }, {\n name: \"validate\",\n description: \"Validate an orb.yml\",\n args: {\n name: \"path\",\n description: \"The /path/to/myProject-orb\"\n }\n }]\n }, {\n name: \"policy\",\n description: \"Manage security policies\",\n subcommands: [{\n name: \"decide\",\n description: \"Make a decision\",\n args: {\n name: \"path\",\n description: \"Policy file or directory path\"\n },\n options: [{\n name: \"--input\",\n description: \"Path to input file, i.e. ./.circleci/config.yml\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--metafile\",\n description: \"Path to decision metadata file\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--owner-id\",\n description: \"The id of the policy's owner\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--strict\",\n description: \"Return non-zero status code for decision resulting in HARD_FAIL\"\n }, {\n name: \"--context\",\n description: \"Policy context for decision, default is 'config'\",\n args: {\n name: \"string\"\n }\n }]\n }, {\n name: \"diff\",\n description: \"Get diff between local and remote policy bundles\",\n args: {\n name: \"policy_dir_path\",\n description: \"Policy file or directory path\"\n },\n options: [{\n name: \"--context\",\n description: \"Policy context for decision, default is 'config'\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--owner-id\",\n description: \"The id of the policy's owner\",\n args: {\n name: \"string\"\n }\n }]\n }, {\n name: \"eval\",\n description: \"Perform raw opa evaluation locally\",\n args: {\n name: \"policy_dir_path\",\n description: \"Policy file or directory path\"\n },\n options: [{\n name: \"--input\",\n description: \"Path to input file, i.e. ./.circleci/config.yml\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--metafile\",\n description: \"Path to decision metadata file\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--query\",\n description: \"Policy decision query, default is 'data'\",\n args: {\n name: \"string\"\n }\n }]\n }, {\n name: \"fetch\",\n description: \"Fetch policy bundle (or a single policy)\",\n args: {\n name: \"policy name\",\n description: \"Name of policy to fetch\"\n },\n options: [{\n name: \"--context\",\n description: \"Policy context for decision, default is 'config'\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--owner-id\",\n description: \"The id of the policy's owner\"\n }]\n }, {\n name: \"logs\",\n description: \"Get policy decision logs / decision log (or policy bundle) by decision ID\",\n args: {\n name: \"decision ID\",\n description: \"Decision ID to get logs for\"\n },\n options: [{\n name: \"--after\",\n description: \"Filter decision logs triggered AFTER this datetime\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--before\",\n description: \"Filter decision logs triggered BEFORE this datetime\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--branch\",\n description: \"Filter decision logs based on branch name\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--context\",\n description: \"Policy context for decision, default is 'config'\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--owner-id\",\n description: \"The id of the policy's owner\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--out\",\n description: \"Specify output file name\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--policy-bundle\",\n description: \"Get only the policy bundle for given decisionID\"\n }, {\n name: \"--project-id\",\n description: \"Filter decision logs based on project-id\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--status\",\n description: \"Filter decision logs based on their status\",\n args: {\n name: \"string\"\n }\n }]\n }, {\n name: \"push\",\n description: \"Push policy bundle (or a single policy)\",\n args: {\n name: \"policy_dir_path\",\n description: \"Policy file or directory path\"\n },\n options: [{\n name: \"--context\",\n description: \"Policy context for decision, default is 'config'\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--owner-id\",\n description: \"The id of the policy's owner\",\n args: {\n name: \"string\"\n }\n }, {\n name: \"--no-prompt\",\n description: \"Removes the prompt\"\n }]\n }, {\n name: \"settings\",\n description: \"Get/set policy decision settings (To read settings: run command without any settings flags)\",\n options: [{\n name: \"--context\",\n description: \"Policy context for decision, default is 'config'\"\n }, {\n name: \"--enabled\",\n requiresSeparator: !0,\n description: \"Enable/disable policy decision evaluation in build pipeline\",\n args: {\n name: \"boolean\",\n isOptional: !0,\n suggestions: [\"true\", \"false\"]\n }\n }, {\n name: \"--owner-id\",\n description: \"The id of the policy's owner\",\n args: {\n name: \"string\"\n }\n }]\n }]\n }, {\n name: \"runner\",\n description: \"Operate on runners\",\n subcommands: [{\n name: \"instance\",\n description: \"Operate on runner instances\"\n }, {\n name: \"resource-class\",\n description: \"Operate on runner resource-classes\"\n }, {\n name: \"token\",\n description: \"Operate on runner tokens\"\n }]\n }, {\n name: \"setup\",\n description: \"Setup CLI with your credentials\"\n }, {\n name: \"update\",\n description: \"Update and switch to new CLI version\",\n subcommands: [{\n name: \"check\",\n description: \"Check for new CLI version\"\n }, {\n name: \"install\",\n description: \"Install new CLI version\"\n }]\n }, {\n name: \"version\",\n description: \"Display CircleCI CLI version\"\n }],\n options: [{\n name: [\"--help\", \"-h\"],\n description: \"Show help for CircleCI\",\n isPersistent: !0\n }, {\n name: \"--skip-update-check\",\n description: \"Skip update check before every command\",\n isPersistent: !0\n }]\n },\n i = e;\n\n\n//# sourceURL=webpack://SuggestionsBundle/./node_modules/@withfig/autocomplete/build/circleci.js?"); + +/***/ }), + +/***/ "./node_modules/@withfig/autocomplete/build/clang.js": +/*!***********************************************************!*\ + !*** ./node_modules/@withfig/autocomplete/build/clang.js ***! + \***********************************************************/ +/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => { + +eval("__webpack_require__.r(__webpack_exports__);\n/* harmony export */ __webpack_require__.d(__webpack_exports__, {\n/* harmony export */ clangBase: () => (/* binding */ e),\n/* harmony export */ \"default\": () => (/* binding */ c),\n/* harmony export */ stdCPPSuggestions: () => (/* binding */ n),\n/* harmony export */ stdCSuggestions: () => (/* binding */ i),\n/* harmony export */ stdHLSLSuggestions: () => (/* binding */ r),\n/* harmony export */ stdOpenCLCPPSuggestions: () => (/* binding */ a),\n/* harmony export */ stdOpenCLSuggestions: () => (/* binding */ t),\n/* harmony export */ stdOption: () => (/* binding */ o)\n/* harmony export */ });\nfunction ownKeys(e, r) { var t = Object.keys(e); if (Object.getOwnPropertySymbols) { var o = Object.getOwnPropertySymbols(e); r && (o = o.filter(function (r) { return Object.getOwnPropertyDescriptor(e, r).enumerable; })), t.push.apply(t, o); } return t; }\nfunction _objectSpread(e) { for (var r = 1; r < arguments.length; r++) { var t = null != arguments[r] ? arguments[r] : {}; r % 2 ? ownKeys(Object(t), !0).forEach(function (r) { _defineProperty(e, r, t[r]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(e, Object.getOwnPropertyDescriptors(t)) : ownKeys(Object(t)).forEach(function (r) { Object.defineProperty(e, r, Object.getOwnPropertyDescriptor(t, r)); }); } return e; }\nfunction _defineProperty(obj, key, value) { key = _toPropertyKey(key); if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }\nfunction _toPropertyKey(t) { var i = _toPrimitive(t, \"string\"); return \"symbol\" == typeof i ? i : String(i); }\nfunction _toPrimitive(t, r) { if (\"object\" != typeof t || !t) return t; var e = t[Symbol.toPrimitive]; if (void 0 !== e) { var i = e.call(t, r || \"default\"); if (\"object\" != typeof i) return i; throw new TypeError(\"@@toPrimitive must return a primitive value.\"); } return (\"string\" === r ? String : Number)(t); }\nvar i = [{\n name: [\"c89\", \"c90\", \"iso9899:1990\"],\n description: \"ISO C 1990\"\n }, {\n name: \"iso9899:199409\",\n description: \"ISO C 1990 with amendment 1\"\n }, {\n name: [\"gnu89\", \"gnu90\"],\n description: \"ISO C 1990 with GNU extensions\"\n }, {\n name: [\"c99\", \"iso9899:1999\"],\n description: \"ISO C 1999\"\n }, {\n name: \"gnu99\",\n description: \"ISO C 1999 with GNU extensions\"\n }, {\n name: [\"c11\", \"iso9899:2011\"],\n description: \"ISO C 2011\"\n }, {\n name: \"gnu11\",\n description: \"ISO C 2011 with GNU extensions\"\n }, {\n name: [\"c17\", \"iso9899:2017\", \"c18\", \"iso9899:2018\"],\n description: \"ISO C 2017\"\n }, {\n name: [\"gnu17\", \"gnu18\"],\n description: \"ISO C 2017 with GNU extensions\"\n }, {\n name: \"c2x\",\n description: \"Working Draft for ISO C2x\"\n }, {\n name: \"gnu2x\",\n description: \"Working Draft for ISO C2x with GNU extensions\"\n }],\n n = [{\n name: [\"c++98\", \"c++03\"],\n description: \"ISO C++ 1998 with amendments\"\n }, {\n name: [\"gnu++98\", \"gnu++03\"],\n description: \"ISO C++ 1998 with amendments and GNU extensions\"\n }, {\n name: \"c++11\",\n description: \"ISO C++ 2011 with amendments\"\n }, {\n name: \"gnu++11\",\n description: \"ISO C++ 2011 with amendments and GNU extensions\"\n }, {\n name: \"c++14\",\n description: \"ISO C++ 2014 with amendments\"\n }, {\n name: \"gnu++14\",\n description: \"ISO C++ 2014 with amendments and GNU extensions\"\n }, {\n name: \"c++17\",\n description: \"ISO C++ 2017 with amendments\"\n }, {\n name: \"gnu++17\",\n description: \"ISO C++ 2017 with amendments and GNU extensions\"\n }, {\n name: \"c++20\",\n description: \"ISO C++ 2020 DIS\"\n }, {\n name: \"gnu++20\",\n description: \"ISO C++ 2020 DIS with GNU extensions\"\n }, {\n name: \"c++2b\",\n description: \"Working draft for ISO C++ 2023 DIS\"\n }, {\n name: \"gnu++2b\",\n description: \"Working draft for ISO C++ 2023 DIS with GNU extensions\"\n }],\n t = [{\n name: \"cl1.0\",\n description: \"OpenCL 1.0\"\n }, {\n name: \"cl1.1\",\n description: \"OpenCL 1.1\"\n }, {\n name: \"cl1.2\",\n description: \"OpenCL 1.2\"\n }, {\n name: \"cl2.0\",\n description: \"OpenCL 2.0\"\n }, {\n name: \"cl3.0\",\n description: \"OpenCL 3.0\"\n }],\n a = [{\n name: [\"clc++\", \"clc++1.0\"],\n description: \"C++ for OpenCL 1.0\"\n }, {\n name: \"clc++2021\",\n description: \"C++ for OpenCL 2021\"\n }],\n r = [{\n name: \"hlsl\",\n description: \"High Level Shader Language\"\n }, {\n name: \"hlsl2015\",\n description: \"High Level Shader Language 2015\"\n }, {\n name: \"hlsl2016\",\n description: \"High Level Shader Language 2016\"\n }, {\n name: \"hlsl2017\",\n description: \"High Level Shader Language 2017\"\n }, {\n name: \"hlsl2018\",\n description: \"High Level Shader Language 2018\"\n }, {\n name: \"hlsl2021\",\n description: \"High Level Shader Language 2021\"\n }, {\n name: \"hlsl202x\",\n description: \"High Level Shader Language 202x\"\n }],\n o = {\n name: \"-std\",\n description: \"Language standard to compile for\",\n args: {\n name: \"value\",\n suggestions: [...i, ...n, ...t, ...a, {\n name: \"cuda\",\n description: \"NVIDIA CUDA(tm)\"\n }, {\n name: \"hip\",\n description: \"HIP\"\n }, ...r]\n },\n requiresSeparator: !0\n },\n e = {\n name: \"clang\",\n description: \"Clang LLVM compiler\",\n args: {\n name: \"file\",\n isVariadic: !0,\n template: \"filepaths\"\n },\n parserDirectives: {\n flagsArePosixNoncompliant: !0\n },\n options: [{\n name: \"-###\",\n description: \"Print (but do not run) the commands to run for this compilation\"\n }, {\n name: \"--analyzer-output\",\n description: \"Static analyzer report output format\",\n args: {\n name: \"value\",\n suggestions: [\"html\", \"plist\", \"plist-multi-file\", \"plist-html\", \"sarif\", \"text\"]\n }\n }, {\n name: \"--analyze\",\n description: \"Run the static analyzer\"\n }, {\n name: \"-arcmt-migrate-emit-errors\",\n description: \"Emit ARC errors even if the migrator can fix them\"\n }, {\n name: \"-arcmt-migrate-report-output\",\n description: \"Output path for the plist report\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-B\",\n description: \"Add to search path for binaries and object files used implicitly\",\n args: {\n name: \"dir\",\n template: \"folders\"\n }\n }, {\n name: \"-CC\",\n description: \"Include comments from within macros in preprocessed output\"\n }, {\n name: \"-cl-denorms-are-zero\",\n description: \"OpenCL only. Allow denormals to be flushed to zero\"\n }, {\n name: \"-cl-fast-relaxed-math\",\n description: \"OpenCL only. Sets -cl-finite-math-only and -cl-unsafe-math-optimizations, and defines __FAST_RELAXED_MATH__\"\n }, {\n name: \"-cl-finite-math-only\",\n description: \"OpenCL only. Allow floating-point optimizations that assume arguments and results are not NaNs or +-Inf\"\n }, {\n name: \"-cl-fp32-correctly-rounded-divide-sqrt\",\n description: \"OpenCL only. Specify that single precision floating-point divide and sqrt used in the program source are correctly rounded\"\n }, {\n name: \"-cl-kernel-arg-info\",\n description: \"OpenCL only. Generate kernel argument metadata\"\n }, {\n name: \"-cl-mad-enable\",\n description: \"OpenCL only. Allow use of less precise MAD computations in the generated binary\"\n }, {\n name: \"-cl-no-signed-zeros\",\n description: \"OpenCL only. Allow use of less precise no signed zeros computations in the generated binary\"\n }, {\n name: \"-cl-opt-disable\",\n description: \"OpenCL only. This option disables all optimizations. By default optimizations are enabled\"\n }, {\n name: \"-cl-single-precision-constant\",\n description: \"OpenCL only. Treat double precision floating-point constant as single precision constant\"\n }, {\n name: \"-cl-std\",\n description: \"OpenCL language standard to compile for\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-cl-strict-aliasing\",\n description: \"OpenCL only. This option is added for compatibility with OpenCL 1.0\"\n }, {\n name: \"-cl-uniform-work-group-size\",\n description: \"OpenCL only. Defines that the global work-size be a multiple of the work-group size specified to clEnqueueNDRangeKernel\"\n }, {\n name: \"-cl-unsafe-math-optimizations\",\n description: \"OpenCL only. Allow unsafe floating-point optimizations. Also implies -cl-no-signed-zeros and -cl-mad-enable\"\n }, {\n name: \"--config\",\n description: \"Specifies configuration file\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"--cuda-compile-host-device\",\n description: \"Compile CUDA code for both host and device (default). Has no effect on non-CUDA compilations\"\n }, {\n name: \"--cuda-device-only\",\n description: \"Compile CUDA code for device only\"\n }, {\n name: \"--cuda-host-only\",\n description: \"Compile CUDA code for host only. Has no effect on non-CUDA compilations\"\n }, {\n name: \"--cuda-include-ptx\",\n description: \"Include PTX for the following GPU architecture (e.g. sm_35) or 'all'. May be specified more than once\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"--cuda-noopt-device-debug\",\n description: \"Enable device-side debug info generation. Disables ptxas optimizations\"\n }, {\n name: \"--cuda-path-ignore-env\",\n description: \"Ignore environment variables to detect CUDA installation\"\n }, {\n name: \"--cuda-path\",\n description: \"CUDA installation path\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-cxx-isystem\",\n description: \"Add directory to the C++ SYSTEM include search path\",\n args: {\n name: \"directory\",\n template: \"folders\"\n }\n }, {\n name: \"-C\",\n description: \"Include comments in preprocessed output\"\n }, {\n name: \"-c\",\n description: \"Only run preprocess, compile, and assemble steps\"\n }, {\n name: \"-dD\",\n description: \"Print macro definitions in -E mode in addition to normal output\"\n }, {\n name: \"-dependency-dot\",\n description: \"Filename to write DOT-formatted header dependencies to\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-dependency-file\",\n description: \"Filename (or -) to write dependency output to\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-dI\",\n description: \"Print include directives in -E mode in addition to normal output\"\n }, {\n name: \"-dM\",\n description: \"Print macro definitions in -E mode instead of normal output\"\n }, {\n name: \"-dsym-dir\",\n description: \"Directory to output dSYM's (if any) to\",\n args: {\n name: \"dir\",\n template: \"folders\"\n }\n }, {\n name: \"-D\",\n description: \"= Define to (or 1 if omitted)\",\n args: {\n name: \"macro\"\n }\n }, {\n name: \"-emit-ast\",\n description: \"Emit Clang AST files for source inputs\"\n }, {\n name: \"-emit-interface-stubs\",\n description: \"Generate Interface Stub Files\"\n }, {\n name: \"-emit-llvm\",\n description: \"Use the LLVM representation for assembler and object files\"\n }, {\n name: \"-emit-merged-ifs\",\n description: \"Generate Interface Stub Files, emit merged text not binary\"\n }, {\n name: \"--emit-static-lib\",\n description: \"Enable linker job to emit a static library\"\n }, {\n name: \"-enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang\",\n description: \"Trivial automatic variable initialization to zero is only here for benchmarks, it'll eventually be removed, and I'm OK with that because I'm only using it to benchmark\"\n }, {\n name: \"-E\",\n description: \"Only run the preprocessor\"\n }, {\n name: \"-faapcs-bitfield-load\",\n description: \"Follows the AAPCS standard that all volatile bit-field write generates at least one load. (ARM only)\"\n }, {\n name: \"-faapcs-bitfield-width\",\n description: \"Follow the AAPCS standard requirement stating that volatile bit-field width is dictated by the field container type. (ARM only)\"\n }, {\n name: \"-faccess-control\"\n }, {\n name: \"-faddrsig\",\n description: \"Emit an address-significance table\"\n }, {\n name: \"-faligned-allocation\",\n description: \"Enable C++17 aligned allocation functions\"\n }, {\n name: \"-fallow-editor-placeholders\",\n description: \"Treat editor placeholders as valid source code\"\n }, {\n name: \"-fansi-escape-codes\",\n description: \"Use ANSI escape codes for diagnostics\"\n }, {\n name: \"-fapinotes-cache-path\",\n description: \"Does nothing; API notes are no longer cached separately from modules\",\n args: {\n name: \"directory\",\n template: \"folders\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fapinotes-modules\",\n description: \"Enable module-based external API notes support\"\n }, {\n name: \"-fapinotes-swift-version\",\n description: \"Specify the Swift version to use when filtering API notes\",\n args: {\n name: \"version\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fapinotes\",\n description: \"Enable external API notes support\"\n }, {\n name: \"-fapple-kext\",\n description: \"Use Apple's kernel extensions ABI\"\n }, {\n name: \"-fapple-link-rtlib\",\n description: \"Force linking the clang builtins runtime library\"\n }, {\n name: \"-fapple-pragma-pack\",\n description: \"Enable Apple gcc-compatible #pragma pack handling\"\n }, {\n name: \"-fapplication-extension\",\n description: \"Restrict code to those available for App Extensions\"\n }, {\n name: \"-fasm-blocks\"\n }, {\n name: \"-fautolink\"\n }, {\n name: \"-fbasic-block-sections\",\n description: \"Place each function's basic blocks in unique sections (ELF Only) : all | labels | none | list=\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fblocks\",\n description: \"Enable the 'blocks' language feature\"\n }, {\n name: \"-fborland-extensions\",\n description: \"Accept non-standard constructs supported by the Borland compiler\"\n }, {\n name: \"-fbranch-target-identification\",\n description: \"Emit branch target identification instructions for indirect branch destinations\"\n }, {\n name: \"-fbuild-session-file\",\n description: \"Use the last modification time of as the build session timestamp\",\n args: {\n name: \"file\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fbuild-session-timestamp\",\n description: \"Time when the current build session started\",\n args: {\n name: \"timesinceEpochinseconds\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fbuiltin-module-map\",\n description: \"Load the clang builtins module map file\"\n }, {\n name: \"-fc++-static-destructors\"\n }, {\n name: \"-fcall-saved-x10\",\n description: \"Make the x10 register call-saved (AArch64 only)\"\n }, {\n name: \"-fcall-saved-x11\",\n description: \"Make the x11 register call-saved (AArch64 only)\"\n }, {\n name: \"-fcall-saved-x12\",\n description: \"Make the x12 register call-saved (AArch64 only)\"\n }, {\n name: \"-fcall-saved-x13\",\n description: \"Make the x13 register call-saved (AArch64 only)\"\n }, {\n name: \"-fcall-saved-x14\",\n description: \"Make the x14 register call-saved (AArch64 only)\"\n }, {\n name: \"-fcall-saved-x15\",\n description: \"Make the x15 register call-saved (AArch64 only)\"\n }, {\n name: \"-fcall-saved-x18\",\n description: \"Make the x18 register call-saved (AArch64 only)\"\n }, {\n name: \"-fcall-saved-x8\",\n description: \"Make the x8 register call-saved (AArch64 only)\"\n }, {\n name: \"-fcall-saved-x9\",\n description: \"Make the x9 register call-saved (AArch64 only)\"\n }, {\n name: \"-fcaret-diagnostics\"\n }, {\n name: \"-fcf-protection\",\n description: \"Instrument control-flow architecture protection\",\n args: {\n name: \"value\",\n isOptional: !0,\n suggestions: [\"return\", \"branch\", \"full\", \"none\"],\n default: \"full\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fchar8_t\",\n description: \"Enable C++ builtin type char8_t\"\n }, {\n name: \"-fclang-abi-compat\",\n description: \"Attempt to match the ABI of Clang \",\n args: {\n name: \"version\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fcolor-diagnostics\",\n description: \"Enable colors in diagnostics\"\n }, {\n name: \"-fcomment-block-commands\",\n description: \"Treat each comma separated argument in as a documentation comment block command\",\n args: {\n name: \"arg\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fcommon\",\n description: \"Place uninitialized global variables in a common block\"\n }, {\n name: \"-fcomplete-member-pointers\",\n description: \"Require member pointer base types to be complete if they would be significant under the Microsoft ABI\"\n }, {\n name: \"-fconstant-cfstrings\"\n }, {\n name: \"-fconvergent-functions\",\n description: \"Assume functions may be convergent\"\n }, {\n name: \"-fcoroutines-ts\",\n description: \"Enable support for the C++ Coroutines TS\"\n }, {\n name: \"-fcoverage-mapping\",\n description: \"Generate coverage mapping to enable code coverage analysis\"\n }, {\n name: \"-fcs-profile-generate\",\n description: \"Generate instrumented code to collect context sensitive execution counts into /default.profraw (overridden by LLVM_PROFILE_FILE env var)\",\n args: {\n name: \"directory\",\n template: \"folders\",\n isOptional: !0\n },\n requiresSeparator: !0\n }, {\n name: \"-fcuda-approx-transcendentals\",\n description: \"Use approximate transcendental functions\"\n }, {\n name: \"-fcuda-flush-denormals-to-zero\",\n description: \"Flush denormal floating point values to zero in CUDA device mode\"\n }, {\n name: \"-fcuda-short-ptr\",\n description: \"Use 32-bit pointers for accessing const/local/shared address spaces\"\n }, {\n name: \"-fcxx-exceptions\",\n description: \"Enable C++ exceptions\"\n }, {\n name: \"-fdata-sections\",\n description: \"Place each data in its own section\"\n }, {\n name: \"-fdebug-compilation-dir\",\n description: \"The compilation directory to embed in the debug info\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-fdebug-default-version\",\n description: \"Default DWARF version to use, if a -g option caused DWARF debug info to be produced\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fdebug-info-for-profiling\",\n description: \"Emit extra debug info to make sample profile more accurate\"\n }, {\n name: \"-fdebug-macro\",\n description: \"Emit macro debug information\"\n }, {\n name: \"-fdebug-prefix-map\",\n description: \"Remap file source paths in debug info\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fdebug-ranges-base-address\",\n description: \"Use DWARF base address selection entries in .debug_ranges\"\n }, {\n name: \"-fdebug-types-section\",\n description: \"Place debug types in their own section (ELF Only)\"\n }, {\n name: \"-fdeclspec\",\n description: \"Allow __declspec as a keyword\"\n }, {\n name: \"-fdelayed-template-parsing\",\n description: \"Parse templated function definitions at the end of the translation unit\"\n }, {\n name: \"-fdelete-null-pointer-checks\",\n description: \"Treat usage of null pointers as undefined behavior (default)\"\n }, {\n name: \"-fdiagnostics-absolute-paths\",\n description: \"Print absolute paths in diagnostics\"\n }, {\n name: \"-fdiagnostics-hotness-threshold\",\n description: \"Prevent optimization remarks from being output if they do not have at least this profile count. Use 'auto' to apply the threshold from profile summary\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fdiagnostics-parseable-fixits\",\n description: \"Print fix-its in machine parseable form\"\n }, {\n name: \"-fdiagnostics-print-source-range-info\",\n description: \"Print source range spans in numeric form\"\n }, {\n name: \"-fdiagnostics-show-hotness\",\n description: \"Enable profile hotness information in diagnostic line\"\n }, {\n name: \"-fdiagnostics-show-note-include-stack\",\n description: \"Display include stacks for diagnostic notes\"\n }, {\n name: \"-fdiagnostics-show-option\",\n description: \"Print option name with mappable diagnostics\"\n }, {\n name: \"-fdiagnostics-show-template-tree\",\n description: \"Print a template comparison tree for differing templates\"\n }, {\n name: \"-fdigraphs\",\n description: \"Enable alternative token representations '<:', ':>', '<%', '%>', '%:', '%:%:' (default)\"\n }, {\n name: \"-fdiscard-value-names\",\n description: \"Discard value names in LLVM IR\"\n }, {\n name: \"-fdollars-in-identifiers\",\n description: \"Allow '$' in identifiers\"\n }, {\n name: \"-fdouble-square-bracket-attributes\",\n description: \"Enable '[[]]' attributes in all C and C++ language modes\"\n }, {\n name: \"-fdwarf-directory-asm\"\n }, {\n name: \"-fdwarf-exceptions\",\n description: \"Use DWARF style exceptions\"\n }, {\n name: \"-felide-constructors\"\n }, {\n name: \"-feliminate-unused-debug-types\",\n description: \"Do not emit debug info for defined but unused types\"\n }, {\n name: \"-fembed-bitcode-marker\",\n description: \"Embed placeholder LLVM IR data as a marker\"\n }, {\n name: \"-fembed-bitcode\",\n description: \"Embed LLVM bitcode\",\n args: {\n name: \"option\",\n suggestions: [\"off\", \"all\", \"bitcode\", \"marker\"],\n isOptional: !0\n },\n requiresSeparator: !0\n }, {\n name: \"-femit-all-decls\",\n description: \"Emit all declarations, even if unused\"\n }, {\n name: \"-femulated-tls\",\n description: \"Use emutls functions to access thread_local variables\"\n }, {\n name: \"-fenable-matrix\",\n description: \"Enable matrix data type and related builtin functions\"\n }, {\n name: \"-fescaping-block-tail-calls\"\n }, {\n name: \"-fexceptions\",\n description: \"Enable support for exception handling\"\n }, {\n name: \"-fexperimental-new-constant-interpreter\",\n description: \"Enable the experimental new constant interpreter\"\n }, {\n name: \"-fexperimental-relative-c++-abi-vtables\",\n description: \"Use the experimental C++ class ABI for classes with virtual tables\"\n }, {\n name: \"-fexperimental-strict-floating-point\",\n description: \"Enables experimental strict floating point in LLVM\"\n }, {\n name: \"-ffast-math\",\n description: \"Allow aggressive, lossy floating-point optimizations\"\n }, {\n name: \"-ffile-prefix-map\",\n description: \"Remap file source paths in debug info and predefined preprocessor macros\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-ffine-grained-bitfield-accesses\",\n description: \"Use separate accesses for consecutive bitfield runs with legal widths and alignments\"\n }, {\n name: \"-ffinite-loops\",\n description: \"Assume all loops are finite\"\n }, {\n name: \"-ffinite-math-only\"\n }, {\n name: \"-ffixed-point\",\n description: \"Enable fixed point types\"\n }, {\n name: \"-ffixed-r19\",\n description: \"Reserve register r19 (Hexagon only)\"\n }, {\n name: \"-ffixed-r9\",\n description: \"Reserve the r9 register (ARM only)\"\n }, {\n name: \"-ffixed-x10\",\n description: \"Reserve the x10 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x11\",\n description: \"Reserve the x11 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x12\",\n description: \"Reserve the x12 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x13\",\n description: \"Reserve the x13 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x14\",\n description: \"Reserve the x14 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x15\",\n description: \"Reserve the x15 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x16\",\n description: \"Reserve the x16 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x17\",\n description: \"Reserve the x17 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x18\",\n description: \"Reserve the x18 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x19\",\n description: \"Reserve the x19 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x1\",\n description: \"Reserve the x1 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x20\",\n description: \"Reserve the x20 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x21\",\n description: \"Reserve the x21 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x22\",\n description: \"Reserve the x22 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x23\",\n description: \"Reserve the x23 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x24\",\n description: \"Reserve the x24 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x25\",\n description: \"Reserve the x25 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x26\",\n description: \"Reserve the x26 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x27\",\n description: \"Reserve the x27 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x28\",\n description: \"Reserve the x28 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x29\",\n description: \"Reserve the x29 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x2\",\n description: \"Reserve the x2 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x30\",\n description: \"Reserve the x30 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x31\",\n description: \"Reserve the x31 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x3\",\n description: \"Reserve the x3 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x4\",\n description: \"Reserve the x4 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x5\",\n description: \"Reserve the x5 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x6\",\n description: \"Reserve the x6 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x7\",\n description: \"Reserve the x7 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x8\",\n description: \"Reserve the x8 register (AArch64/RISC-V only)\"\n }, {\n name: \"-ffixed-x9\",\n description: \"Reserve the x9 register (AArch64/RISC-V only)\"\n }, {\n name: \"-fforce-dwarf-frame\",\n description: \"Always emit a debug frame section\"\n }, {\n name: \"-fforce-emit-vtables\",\n description: \"Emits more virtual tables to improve devirtualization\"\n }, {\n name: \"-fforce-enable-int128\",\n description: \"Enable support for int128_t type\"\n }, {\n name: \"-ffp-contract\",\n description: \"Form fused FP ops (e.g. FMAs): fast (fuses across statements disregarding pragmas) | on (only fuses in the same statement unless dictated by pragmas) | off (never fuses) | fast-honor-pragmas (fuses across statements unless diectated by pragmas). Default is 'fast' for CUDA, 'fast-honor-pragmas' for HIP, and 'on' otherwise\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-ffp-exception-behavior\",\n description: \"Specifies the exception behavior of floating-point operations\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-ffp-model\",\n description: \"Controls the semantics of floating-point calculations\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-ffreestanding\",\n description: \"Assert that the compilation takes place in a freestanding environment\"\n }, {\n name: \"-ffunction-sections\",\n description: \"Place each function in its own section\"\n }, {\n name: \"-fglobal-isel\",\n description: \"Enables the global instruction selector\"\n }, {\n name: \"-fgnu-inline-asm\"\n }, {\n name: \"-fgnu-keywords\",\n description: \"Allow GNU-extension keywords regardless of language standard\"\n }, {\n name: \"-fgnu-runtime\",\n description: \"Generate output compatible with the standard GNU Objective-C runtime\"\n }, {\n name: \"-fgnu89-inline\",\n description: \"Use the gnu89 inline semantics\"\n }, {\n name: \"-fgnuc-version\",\n description: \"Sets various macros to claim compatibility with the given GCC version (default is 4.2.1)\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fgpu-allow-device-init\",\n description: \"Allow device side init function in HIP\"\n }, {\n name: \"-fgpu-defer-diag\",\n description: \"Defer host/device related diagnostic messages for CUDA/HIP\"\n }, {\n name: \"-fgpu-rdc\",\n description: \"Generate relocatable device code, also known as separate compilation mode\"\n }, {\n name: \"-fhip-new-launch-api\",\n description: \"Use new kernel launching API for HIP\"\n }, {\n name: \"-fignore-exceptions\",\n description: \"Enable support for ignoring exception handling constructs\"\n }, {\n name: \"-fimplicit-module-maps\",\n description: \"Implicitly search the file system for module map files\"\n }, {\n name: \"-fimplicit-modules\"\n }, {\n name: \"-finline-functions\",\n description: \"Inline suitable functions\"\n }, {\n name: \"-finline-hint-functions\",\n description: \"Inline functions which are (explicitly or implicitly) marked inline\"\n }, {\n name: \"-finstrument-function-entry-bare\",\n description: \"Instrument function entry only, after inlining, without arguments to the instrumentation call\"\n }, {\n name: \"-finstrument-functions-after-inlining\",\n description: \"Like -finstrument-functions, but insert the calls after inlining\"\n }, {\n name: \"-finstrument-functions\",\n description: \"Generate calls to instrument function entry and exit\"\n }, {\n name: \"-fintegrated-as\",\n description: \"Enable the integrated assembler\"\n }, {\n name: \"-fintegrated-cc1\",\n description: \"Run cc1 in-process\"\n }, {\n name: \"-fjump-tables\",\n description: \"Use jump tables for lowering switches\"\n }, {\n name: \"-fkeep-static-consts\",\n description: \"Keep static const variables if unused\"\n }, {\n name: \"-flax-vector-conversions\",\n description: \"Enable implicit vector bit-casts\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-flegacy-pass-manager\",\n description: \"Use the legacy pass manager in LLVM\"\n }, {\n name: \"-flto-jobs\",\n description: \"Controls the backend parallelism of -flto=thin (default of 0 means the number of threads will be derived from the number of CPUs detected)\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-flto\",\n description: \"Set LTO mode to either 'full' or 'thin'\",\n args: {\n name: \"value\",\n suggestions: [\"full\", \"thin\"],\n isOptional: !0,\n default: \"full\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fmacro-prefix-map\",\n description: \"Remap file source paths in predefined preprocessor macros\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fmath-errno\",\n description: \"Require math functions to indicate errors by setting errno\"\n }, {\n name: \"-fmax-tokens\",\n description: \"Max total number of preprocessed tokens for -Wmax-tokens\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fmax-type-align\",\n description: \"Specify the maximum alignment to enforce on pointers lacking an explicit alignment\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fmemory-profile\",\n description: \"Enable heap memory profiling and dump results into \",\n args: {\n name: \"directory\",\n isOptional: !0\n },\n requiresSeparator: !0\n }, {\n name: \"-fmerge-all-constants\",\n description: \"Allow merging of constants\"\n }, {\n name: \"-fmessage-length\",\n description: \"Format message diagnostics so that they fit within N columns\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fmodule-file\",\n description: \"[=] Specify the mapping of module name to precompiled module file, or load a module file if name is omitted\",\n requiresSeparator: !0\n }, {\n name: \"-fmodule-map-file\",\n description: \"Load this module map file\",\n args: {\n name: \"file\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fmodule-name\",\n description: \"Specify the name of the module to build\",\n args: {\n name: \"name\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fmodules-cache-path\",\n description: \"Specify the module cache path\",\n args: {\n name: \"directory\",\n template: \"folders\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fmodules-decluse\",\n description: \"Require declaration of modules used within a module\"\n }, {\n name: \"-fmodules-disable-diagnostic-validation\",\n description: \"Disable validation of the diagnostic options when loading the module\"\n }, {\n name: \"-fmodules-ignore-macro\",\n description: \"Ignore the definition of the given macro when building and loading modules\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fmodules-prune-after\",\n description: \"Specify the interval (in seconds) after which a module file will be considered unused\",\n args: {\n name: \"seconds\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fmodules-prune-interval\",\n description: \"Specify the interval (in seconds) between attempts to prune the module cache\",\n args: {\n name: \"seconds\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fmodules-search-all\",\n description: \"Search even non-imported modules to resolve references\"\n }, {\n name: \"-fmodules-strict-decluse\",\n description: \"Like -fmodules-decluse but requires all headers to be in modules\"\n }, {\n name: \"-fmodules-ts\",\n description: \"Enable support for the C++ Modules TS\"\n }, {\n name: \"-fmodules-user-build-path\",\n description: \"Specify the module user build path\",\n args: {\n name: \"directory\",\n template: \"folders\"\n }\n }, {\n name: \"-fmodules-validate-input-files-content\",\n description: \"Validate PCM input files based on content if mtime differs\"\n }, {\n name: \"-fmodules-validate-once-per-build-session\",\n description: \"Don't verify input files for the modules if the module has been successfully validated or loaded during this build session\"\n }, {\n name: \"-fmodules-validate-system-headers\",\n description: \"Validate the system headers that a module depends on when loading the module\"\n }, {\n name: \"-fmodules\",\n description: \"Enable the 'modules' language feature\"\n }, {\n name: \"-fms-compatibility-version\",\n description: \"Dot-separated value representing the Microsoft compiler version number to report in _MSC_VER (0 = don't define it (default))\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fms-compatibility\",\n description: \"Enable full Microsoft Visual C++ compatibility\"\n }, {\n name: \"-fms-extensions\",\n description: \"Accept some non-standard constructs supported by the Microsoft compiler\"\n }, {\n name: \"-fmsc-version\",\n description: \"Microsoft compiler version number to report in _MSC_VER (0 = don't define it (default))\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fnew-alignment\",\n description: \"Specifies the largest alignment guaranteed by '::operator new(size_t)'\",\n args: {\n name: \"align\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fno-aapcs-bitfield-width\",\n description: \"Do not follow the AAPCS standard requirement stating that volatile bit-field width is dictated by the field container type. (ARM only)\"\n }, {\n name: \"-fno-access-control\",\n description: \"Disable C++ access control\"\n }, {\n name: \"-fno-addrsig\",\n description: \"Don't emit an address-significance table\"\n }, {\n name: \"-fno-allow-editor-placeholders\"\n }, {\n name: \"-fno-apinotes-modules\",\n description: \"Disable module-based external API notes support\"\n }, {\n name: \"-fno-apinotes\",\n description: \"Disable external API notes support\"\n }, {\n name: \"-fno-apple-pragma-pack\"\n }, {\n name: \"-fno-application-extension\"\n }, {\n name: \"-fno-asm-blocks\"\n }, {\n name: \"-fno-assume-sane-operator-new\",\n description: \"Don't assume that C++'s global operator new can't alias any pointer\"\n }, {\n name: \"-fno-autolink\",\n description: \"Disable generation of linker directives for automatic library linking\"\n }, {\n name: \"-fno-blocks\"\n }, {\n name: \"-fno-borland-extensions\"\n }, {\n name: \"-fno-builtin-value\",\n description: \"Disable implicit builtin knowledge of a specific function\",\n args: {\n name: \"\"\n }\n }, {\n name: \"-fno-builtin\",\n description: \"Disable implicit builtin knowledge of functions\"\n }, {\n name: \"-fno-c++-static-destructors\",\n description: \"Disable C++ static destructor registration\"\n }, {\n name: \"-fno-caret-diagnostics\"\n }, {\n name: \"-fno-char8_t\",\n description: \"Disable C++ builtin type char8_t\"\n }, {\n name: \"-fno-color-diagnostics\",\n description: \"Disable colors in diagnostics\"\n }, {\n name: \"-fno-common\",\n description: \"Compile common globals like normal definitions\"\n }, {\n name: \"-fno-complete-member-pointers\",\n description: \"Do not require member pointer base types to be complete if they would be significant under the Microsoft ABI\"\n }, {\n name: \"-fno-constant-cfstrings\",\n description: \"Disable creation of CodeFoundation-type constant strings\"\n }, {\n name: \"-fno-constant-nsarray-literals\",\n description: \"Disable creation of CodeFoundation-type constant initializer `NSArray`s from array literals\"\n }, {\n name: \"-fno-constant-nsdictionary-literals\",\n description: \"Disable creation of CodeFoundation-type constant initializer `NSDictionary`s from dictionary literals\"\n }, {\n name: \"-fno-constant-nsnumber-literals\",\n description: \"Disable creation of CodeFoundation-type constant initializer `NSNumber`s from number literals\"\n }, {\n name: \"-fno-coroutines-ts\"\n }, {\n name: \"-fno-coverage-mapping\",\n description: \"Disable code coverage analysis\"\n }, {\n name: \"-fno-crash-diagnostics\",\n description: \"Disable auto-generation of preprocessed source files and a script for reproduction during a clang crash\"\n }, {\n name: \"-fno-cuda-approx-transcendentals\",\n description: \"Don't use approximate transcendental functions\"\n }, {\n name: \"-fno-cuda-short-ptr\"\n }, {\n name: \"-fno-cxx-exceptions\"\n }, {\n name: \"-fno-cxx-modules\",\n description: \"Disable modules for C++\"\n }, {\n name: \"-fno-data-sections\"\n }, {\n name: \"-fno-debug-info-for-profiling\"\n }, {\n name: \"-fno-debug-macro\",\n description: \"Do not emit macro debug information\"\n }, {\n name: \"-fno-debug-ranges-base-address\"\n }, {\n name: \"-fno-declspec\",\n description: \"Disallow __declspec as a keyword\"\n }, {\n name: \"-fno-delayed-template-parsing\",\n description: \"Disable delayed template parsing\"\n }, {\n name: \"-fno-delete-null-pointer-checks\",\n description: \"Do not treat usage of null pointers as undefined behavior\"\n }, {\n name: \"-fno-diagnostics-fixit-info\",\n description: \"Do not include fixit information in diagnostics\"\n }, {\n name: \"-fno-diagnostics-show-hotness\"\n }, {\n name: \"-fno-digraphs\",\n description: \"Disallow alternative token representations '<:', ':>', '<%', '%>', '%:', '%:%:'\"\n }, {\n name: \"-fno-discard-value-names\",\n description: \"Do not discard value names in LLVM IR\"\n }, {\n name: \"-fno-dollars-in-identifiers\",\n description: \"Disallow '$' in identifiers\"\n }, {\n name: \"-fno-double-square-bracket-attributes\",\n description: \"Disable '[[]]' attributes in all C and C++ language modes\"\n }, {\n name: \"-fno-dwarf-directory-asm\"\n }, {\n name: \"-fno-elide-constructors\",\n description: \"Disable C++ copy constructor elision\"\n }, {\n name: \"-fno-elide-type\",\n description: \"Do not elide types when printing diagnostics\"\n }, {\n name: \"-fno-eliminate-unused-debug-types\",\n description: \"Emit debug info for defined but unused types\"\n }, {\n name: \"-fno-escaping-block-tail-calls\"\n }, {\n name: \"-fno-exceptions\",\n description: \"Disable support for exception handling\"\n }, {\n name: \"-fno-experimental-relative-c++-abi-vtables\",\n description: \"Do not use the experimental C++ class ABI for classes with virtual tables\"\n }, {\n name: \"-fno-fast-math\"\n }, {\n name: \"-fno-fine-grained-bitfield-accesses\",\n description: \"Use large-integer access for consecutive bitfield runs\"\n }, {\n name: \"-fno-finite-loops\",\n description: \"Do not assume that any loop is finite\"\n }, {\n name: \"-fno-finite-math-only\"\n }, {\n name: \"-fno-fixed-point\",\n description: \"Disable fixed point types\"\n }, {\n name: \"-fno-force-dwarf-frame\"\n }, {\n name: \"-fno-force-emit-vtables\"\n }, {\n name: \"-fno-force-enable-int128\",\n description: \"Disable support for int128_t type\"\n }, {\n name: \"-fno-function-sections\"\n }, {\n name: \"-fno-global-isel\",\n description: \"Disables the global instruction selector\"\n }, {\n name: \"-fno-gnu-inline-asm\",\n description: \"Disable GNU style inline asm\"\n }, {\n name: \"-fno-gnu89-inline\"\n }, {\n name: \"-fno-gpu-allow-device-init\",\n description: \"Don't allow device side init function in HIP\"\n }, {\n name: \"-fno-gpu-defer-diag\",\n description: \"Don't defer host/device related diagnostic messages for CUDA/HIP\"\n }, {\n name: \"-fno-gpu-rdc\"\n }, {\n name: \"-fno-hip-new-launch-api\",\n description: \"Don't use new kernel launching API for HIP\"\n }, {\n name: \"-fno-implicit-modules\"\n }, {\n name: \"-fno-integrated-as\",\n description: \"Disable the integrated assembler\"\n }, {\n name: \"-fno-integrated-cc1\",\n description: \"Spawn a separate process for each cc1\"\n }, {\n name: \"-fno-jump-tables\",\n description: \"Do not use jump tables for lowering switches\"\n }, {\n name: \"-fno-keep-static-consts\",\n description: \"Don't keep static const variables if unused\"\n }, {\n name: \"-fno-legacy-pass-manager\",\n description: \"Use the new pass manager in LLVM\"\n }, {\n name: \"-fno-lto\",\n description: \"Disable LTO mode (default)\"\n }, {\n name: \"-fno-math-errno\"\n }, {\n name: \"-fno-memory-profile\",\n description: \"Disable heap memory profiling\"\n }, {\n name: \"-fno-merge-all-constants\",\n description: \"Disallow merging of constants\"\n }, {\n name: \"-fno-modules-validate-system-headers\"\n }, {\n name: \"-fno-objc-arc-exceptions\"\n }, {\n name: \"-fno-objc-convert-messages-to-runtime-calls\"\n }, {\n name: \"-fno-objc-encode-cxx-class-template-spec\"\n }, {\n name: \"-fno-objc-exceptions\"\n }, {\n name: \"-fno-objc-infer-related-result-type\",\n description: \"Do not infer Objective-C related result type based on method family\"\n }, {\n name: \"-fno-operator-names\",\n description: \"Do not treat C++ operator name keywords as synonyms for operators\"\n }, {\n name: \"-fno-pascal-strings\"\n }, {\n name: \"-fno-pch-codegen\",\n description: \"Do not generate code for uses of this PCH that assumes an explicit object file will be built for the PCH\"\n }, {\n name: \"-fno-pch-debuginfo\",\n description: \"Do not generate debug info for types in an object file built from this PCH and do not generate them elsewhere\"\n }, {\n name: \"-fno-pch-instantiate-templates\"\n }, {\n name: \"-fno-plt\",\n description: \"Use GOT indirection instead of PLT to make external function calls (x86 only)\"\n }, {\n name: \"-fno-prebuilt-implicit-modules\"\n }, {\n name: \"-fno-preserve-as-comments\",\n description: \"Do not preserve comments in inline assembly\"\n }, {\n name: \"-fno-profile-arcs\"\n }, {\n name: \"-fno-profile-generate\",\n description: \"Disable generation of profile instrumentation\"\n }, {\n name: \"-fno-profile-instr-generate\",\n description: \"Disable generation of profile instrumentation\"\n }, {\n name: \"-fno-profile-instr-use\",\n description: \"Disable using instrumentation data for profile-guided optimization\"\n }, {\n name: \"-fno-pseudo-probe-for-profiling\",\n description: \"Do not emit pseudo probes for sample profiler\"\n }, {\n name: \"-fno-ptrauth-abi-version\",\n description: \"Disable Pointer Authentication ABI versioning\"\n }, {\n name: \"-fno-ptrauth-kernel-abi-version\",\n description: \"Disable Pointer Authentication kernel ABI versioning\"\n }, {\n name: \"-fno-reciprocal-math\"\n }, {\n name: \"-fno-register-global-dtors-with-atexit\",\n description: \"Don't use atexit or __cxa_atexit to register global destructors\"\n }, {\n name: \"-fno-relaxed-template-template-args\"\n }, {\n name: \"-fno-reroll-loops\"\n }, {\n name: \"-fno-rewrite-imports\"\n }, {\n name: \"-fno-rewrite-includes\"\n }, {\n name: \"-fno-ropi\"\n }, {\n name: \"-fno-rtlib-add-rpath\",\n description: \"Do not add -rpath with architecture-specific resource directory to the linker flags\"\n }, {\n name: \"-fno-rtti-data\",\n description: \"Disable generation of RTTI data\"\n }, {\n name: \"-fno-rtti\",\n description: \"Disable generation of rtti information\"\n }, {\n name: \"-fno-rwpi\"\n }, {\n name: \"-fno-sanitize-address-poison-custom-array-cookie\",\n description: \"Disable poisoning array cookies when using custom operator new[] in AddressSanitizer\"\n }, {\n name: \"-fno-sanitize-address-use-after-scope\",\n description: \"Disable use-after-scope detection in AddressSanitizer\"\n }, {\n name: \"-fno-sanitize-address-use-odr-indicator\",\n description: \"Disable ODR indicator globals\"\n }, {\n name: \"-fno-sanitize-blacklist\",\n description: \"Don't use blacklist file for sanitizers\"\n }, {\n name: \"-fno-sanitize-cfi-canonical-jump-tables\",\n description: \"Do not make the jump table addresses canonical in the symbol table\"\n }, {\n name: \"-fno-sanitize-cfi-cross-dso\",\n description: \"Disable control flow integrity (CFI) checks for cross-DSO calls\"\n }, {\n name: \"-fno-sanitize-coverage\",\n description: \"Disable specified features of coverage instrumentation for Sanitizers\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fno-sanitize-memory-track-origins\",\n description: \"Disable origins tracking in MemorySanitizer\"\n }, {\n name: \"-fno-sanitize-memory-use-after-dtor\",\n description: \"Disable use-after-destroy detection in MemorySanitizer\"\n }, {\n name: \"-fno-sanitize-minimal-runtime\"\n }, {\n name: \"-fno-sanitize-recover\",\n description: \"Disable recovery for specified sanitizers\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fno-sanitize-stats\",\n description: \"Disable sanitizer statistics gathering\"\n }, {\n name: \"-fno-sanitize-thread-atomics\",\n description: \"Disable atomic operations instrumentation in ThreadSanitizer\"\n }, {\n name: \"-fno-sanitize-thread-func-entry-exit\",\n description: \"Disable function entry/exit instrumentation in ThreadSanitizer\"\n }, {\n name: \"-fno-sanitize-thread-memory-access\",\n description: \"Disable memory access instrumentation in ThreadSanitizer\"\n }, {\n name: \"-fno-sanitize-trap\",\n description: \"Disable trapping for sanitizers\",\n args: {\n name: \"value\",\n isOptional: !0\n },\n requiresSeparator: !0\n }, {\n name: \"-fno-semantic-interposition\"\n }, {\n name: \"-fno-short-enums\"\n }, {\n name: \"-fno-short-wchar\",\n description: \"Force wchar_t to be an unsigned int\"\n }, {\n name: \"-fno-show-column\",\n description: \"Do not include column number on diagnostics\"\n }, {\n name: \"-fno-show-source-location\",\n description: \"Do not include source location information with diagnostics\"\n }, {\n name: \"-fno-signed-char\",\n description: \"Char is unsigned\"\n }, {\n name: \"-fno-signed-zeros\",\n description: \"Allow optimizations that ignore the sign of floating point zeros\"\n }, {\n name: \"-fno-sized-deallocation\"\n }, {\n name: \"-fno-spell-checking\",\n description: \"Disable spell-checking\"\n }, {\n name: \"-fno-split-dwarf-inlining\"\n }, {\n name: \"-fno-split-lto-unit\"\n }, {\n name: \"-fno-split-machine-functions\",\n description: \"Disable late function splitting using profile information (x86 ELF)\"\n }, {\n name: \"-fno-stack-check\",\n description: \"Disable stack checking\"\n }, {\n name: \"-fno-stack-clash-protection\",\n description: \"Disable stack clash protection\"\n }, {\n name: \"-fno-stack-protector\",\n description: \"Disable the use of stack protectors\"\n }, {\n name: \"-fno-stack-size-section\"\n }, {\n name: \"-fno-standalone-debug\",\n description: \"Limit debug information produced to reduce size of debug binary\"\n }, {\n name: \"-fno-strict-float-cast-overflow\",\n description: \"Relax language rules and try to match the behavior of the target's native float-to-int conversion instructions\"\n }, {\n name: \"-fno-strict-return\",\n description: \"Don't treat control flow paths that fall off the end of a non-void function as unreachable\"\n }, {\n name: \"-fno-strict-vtable-pointers\"\n }, {\n name: \"-fno-sycl\",\n description: \"Disable SYCL kernels compilation for device\"\n }, {\n name: \"-fno-temp-file\",\n description: \"Directly create compilation output files. This may lead to incorrect incremental builds if the compiler crashes\"\n }, {\n name: \"-fno-test-coverage\"\n }, {\n name: \"-fno-threadsafe-statics\",\n description: \"Do not emit code to make initialization of local statics thread safe\"\n }, {\n name: \"-fno-trigraphs\",\n description: \"Do not process trigraph sequences\"\n }, {\n name: \"-fno-unique-basic-block-section-names\"\n }, {\n name: \"-fno-unique-internal-linkage-names\"\n }, {\n name: \"-fno-unique-section-names\",\n description: \"Don't use unique names for text and data sections\"\n }, {\n name: \"-fno-unroll-loops\",\n description: \"Turn off loop unroller\"\n }, {\n name: \"-fno-use-cxa-atexit\",\n description: \"Don't use __cxa_atexit for calling destructors\"\n }, {\n name: \"-fno-use-init-array\",\n description: \"Use .ctors/.dtors instead of .init_array/.fini_array\"\n }, {\n name: \"-fno-use-line-directives\"\n }, {\n name: \"-fno-virtual-function-elimination\"\n }, {\n name: \"-fno-visibility-from-dllstorageclass\"\n }, {\n name: \"-fno-visibility-inlines-hidden-static-local-var\",\n description: \"Disables -fvisibility-inlines-hidden-static-local-var (this is the default on non-darwin targets)\"\n }, {\n name: \"-fno-whole-program-vtables\"\n }, {\n name: \"-fno-xray-always-emit-customevents\"\n }, {\n name: \"-fno-xray-always-emit-typedevents\"\n }, {\n name: \"-fno-xray-function-index\",\n description: \"Omit function index section at the expense of single-function patching performance\"\n }, {\n name: \"-fno-xray-ignore-loops\"\n }, {\n name: \"-fno-xray-instrument\"\n }, {\n name: \"-fno-zero-initialized-in-bss\",\n description: \"Don't place zero initialized data in BSS\"\n }, {\n name: \"-fno-zvector\"\n }, {\n name: \"-fobjc-arc-exceptions\",\n description: \"Use EH-safe code when synthesizing retains and releases in -fobjc-arc\"\n }, {\n name: \"-fobjc-arc\",\n description: \"Synthesize retain and release calls for Objective-C pointers\"\n }, {\n name: \"-fobjc-convert-messages-to-runtime-calls\"\n }, {\n name: \"-fobjc-disable-direct-methods-for-testing\",\n description: \"Ignore attribute objc_direct so that direct methods can be tested\"\n }, {\n name: \"-fobjc-encode-cxx-class-template-spec\",\n description: \"Fully encode c++ class template specialization\"\n }, {\n name: \"-fobjc-exceptions\",\n description: \"Enable Objective-C exceptions\"\n }, {\n name: \"-fobjc-infer-related-result-type\"\n }, {\n name: \"-fobjc-relative-method-lists\",\n description: \"Enable relative method lists\"\n }, {\n name: \"-fobjc-runtime\",\n description: \"Specify the target Objective-C runtime kind and version\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fobjc-weak\",\n description: \"Enable ARC-style weak references in Objective-C\"\n }, {\n name: \"-fopenmp-simd\",\n description: \"Emit OpenMP code only for SIMD-based constructs\"\n }, {\n name: \"-fopenmp-targets\",\n description: \"Specify comma-separated list of triples OpenMP offloading targets to be supported\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fopenmp\",\n description: \"Parse OpenMP pragmas and generate parallel code\"\n }, {\n name: \"-foptimization-record-file\",\n description: \"Specify the output name of the file containing the optimization remarks. Implies -fsave-optimization-record. On Darwin platforms, this cannot be used with multiple -arch options\",\n args: {\n name: \"file\"\n },\n requiresSeparator: !0\n }, {\n name: \"-foptimization-record-passes\",\n description: \"Only include passes which match a specified regular expression in the generated optimization record (by default, include all passes)\",\n args: {\n name: \"regex\"\n },\n requiresSeparator: !0\n }, {\n name: \"-forder-file-instrumentation\",\n description: \"Generate instrumented code to collect order file into default.profraw file (overridden by '=' form of option or LLVM_PROFILE_FILE env var)\"\n }, {\n name: \"-fpack-struct\",\n description: \"Specify the default maximum struct packing alignment\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fpascal-strings\",\n description: \"Recognize and construct Pascal-style string literals\"\n }, {\n name: \"-fpass-plugin\",\n description: \"Load pass plugin from a dynamic shared object file (only with new pass manager)\",\n args: {\n name: \"dsopath\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fpatchable-function-entry\",\n description: \"Generate M NOPs before function entry and N-M NOPs after function entry\",\n args: {\n name: \"N,M\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fpcc-struct-return\",\n description: \"Override the default ABI to return all structs on the stack\"\n }, {\n name: \"-fpch-codegen\",\n description: \"Generate code for uses of this PCH that assumes an explicit object file will be built for the PCH\"\n }, {\n name: \"-fpch-debuginfo\",\n description: \"Generate debug info for types in an object file built from this PCH and do not generate them elsewhere\"\n }, {\n name: \"-fpch-instantiate-templates\",\n description: \"Instantiate templates already while building a PCH\"\n }, {\n name: \"-fpch-validate-input-files-content\",\n description: \"Validate PCH input files based on content if mtime differs\"\n }, {\n name: \"-fplt\"\n }, {\n name: \"-fplugin\",\n description: \"Load the named plugin (dynamic shared object)\",\n args: {\n name: \"dsopath\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fprebuilt-implicit-modules\",\n description: \"Look up implicit modules in the prebuilt module path\"\n }, {\n name: \"-fprebuilt-module-path\",\n description: \"Specify the prebuilt module path\",\n args: {\n name: \"directory\",\n template: \"folders\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fpreserve-as-comments\"\n }, {\n name: \"-fproc-stat-report\",\n description: \"Save subprocess statistics to the given file\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fproc-stat-reportvalue\",\n description: \"Print subprocess statistics\",\n args: {\n name: \"\"\n }\n }, {\n name: \"-fprofile-arcs\"\n }, {\n name: \"-fprofile-exclude-files\",\n description: \"Instrument only functions from files where names don't match all the regexes separated by a semi-colon\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fprofile-filter-files\",\n description: \"Instrument only functions from files where names match any regex separated by a semi-colon\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fprofile-generate\",\n description: \"Generate instrumented code to collect execution counts into /default.profraw (overridden by LLVM_PROFILE_FILE env var)\",\n args: {\n name: \"directory\",\n isOptional: !0\n },\n requiresSeparator: !0\n }, {\n name: \"-fprofile-instr-generate\",\n description: \"Generate instrumented code to collect execution counts into (overridden by LLVM_PROFILE_FILE env var)\",\n args: {\n name: \"file\",\n isOptional: !0,\n template: \"filepaths\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fprofile-instr-use\",\n description: \"Use instrumentation data for profile-guided optimization\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fprofile-remapping-file\",\n description: \"Use the remappings described in to match the profile data against names in the program\",\n args: {\n name: \"file\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fprofile-sample-accurate\",\n description: \"Specifies that the sample profile is accurate\"\n }, {\n name: \"-fprofile-sample-use\",\n description: \"Enable sample-based profile guided optimizations\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fprofile-update\",\n description: \"Set update method of profile counters (atomic,prefer-atomic,single)\",\n args: {\n name: \"method\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fprofile-use\",\n description: \"Use instrumentation data for profile-guided optimization. If pathname is a directory, it reads from /default.profdata. Otherwise, it reads from file \",\n args: {\n name: \"pathname\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fpseudo-probe-for-profiling\",\n description: \"Emit pseudo probes for sample profiler\"\n }, {\n name: \"-fptrauth-abi-version\",\n description: \"Pointer Authentication ABI version\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fptrauth-auth-traps\",\n description: \"Enable traps on authentication failures\"\n }, {\n name: \"-fptrauth-block-descriptor-pointers\",\n description: \"Enable signing block descriptors\"\n }, {\n name: \"-fptrauth-calls\",\n description: \"Enable signing and authentication of all indirect calls\"\n }, {\n name: \"-fptrauth-function-pointer-type-discrimination\",\n description: \"Enabling type discrimination on C function pointers\"\n }, {\n name: \"-fptrauth-indirect-gotos\",\n description: \"Enable signing and authentication of indirect goto targets\"\n }, {\n name: \"-fptrauth-intrinsics\",\n description: \"Enable pointer-authentication intrinsics\"\n }, {\n name: \"-fptrauth-kernel-abi-version\",\n description: \"Enable Pointer Authentication kernel ABI version\"\n }, {\n name: \"-fptrauth-objc-isa-masking\",\n description: \"Pre- and post-authentication masking mode of objective-c isa pointers\"\n }, {\n name: \"-fptrauth-objc-isa-mode\",\n description: \"Authentication mode for ObjC isa field. Full auth if unspecified\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fptrauth-objc-isa\",\n description: \"Enable signing and authentication of Objective-C object's 'isa' field\"\n }, {\n name: \"-fptrauth-returns\",\n description: \"Enable signing and authentication of return addresses\"\n }, {\n name: \"-fptrauth-soft\",\n description: \"Enable software lowering of pointer authentication\"\n }, {\n name: \"-fptrauth-vtable-pointer-address-discrimination\",\n description: \"Enable address discrimination of vtable pointers\"\n }, {\n name: \"-fptrauth-vtable-pointer-type-discrimination\",\n description: \"Enable type discrimination of vtable pointers\"\n }, {\n name: \"-freciprocal-math\",\n description: \"Allow division operations to be reassociated\"\n }, {\n name: \"-freg-struct-return\",\n description: \"Override the default ABI to return small structs in registers\"\n }, {\n name: \"-fregister-global-dtors-with-atexit\",\n description: \"Use atexit or __cxa_atexit to register global destructors\"\n }, {\n name: \"-frelaxed-template-template-args\",\n description: \"Enable C++17 relaxed template template argument matching\"\n }, {\n name: \"-freroll-loops\",\n description: \"Turn on loop reroller\"\n }, {\n name: \"-frewrite-imports\"\n }, {\n name: \"-frewrite-includes\"\n }, {\n name: \"-fropi\",\n description: \"Generate read-only position independent code (ARM only)\"\n }, {\n name: \"-frtlib-add-rpath\",\n description: \"Add -rpath with architecture-specific resource directory to the linker flags\"\n }, {\n name: \"-frtti-data\"\n }, {\n name: \"-frtti\"\n }, {\n name: \"-frwpi\",\n description: \"Generate read-write position independent code (ARM only)\"\n }, {\n name: \"-fsanitize-address-field-padding\",\n description: \"Level of field padding for AddressSanitizer\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-address-globals-dead-stripping\",\n description: \"Enable linker dead stripping of globals in AddressSanitizer\"\n }, {\n name: \"-fsanitize-address-poison-custom-array-cookie\",\n description: \"Enable poisoning array cookies when using custom operator new[] in AddressSanitizer\"\n }, {\n name: \"-fsanitize-address-use-after-scope\",\n description: \"Enable use-after-scope detection in AddressSanitizer\"\n }, {\n name: \"-fsanitize-address-use-odr-indicator\",\n description: \"Enable ODR indicator globals to avoid false ODR violation reports in partially sanitized programs at the cost of an increase in binary size\"\n }, {\n name: \"-fsanitize-blacklist\",\n description: \"Path to blacklist file for sanitizers\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-cfi-canonical-jump-tables\",\n description: \"Make the jump table addresses canonical in the symbol table\"\n }, {\n name: \"-fsanitize-cfi-cross-dso\",\n description: \"Enable control flow integrity (CFI) checks for cross-DSO calls\"\n }, {\n name: \"-fsanitize-cfi-icall-generalize-pointers\",\n description: \"Generalize pointers in CFI indirect call type signature checks\"\n }, {\n name: \"-fsanitize-coverage-allowlist\",\n description: \"Restrict sanitizer coverage instrumentation exclusively to modules and functions that match the provided special case list, except the blocked ones\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-coverage-blacklist\",\n description: \"Deprecated, use -fsanitize-coverage-blocklist= instead\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-coverage-blocklist\",\n description: \"Disable sanitizer coverage instrumentation for modules and functions that match the provided special case list, even the allowed ones\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-coverage-whitelist\",\n description: \"Deprecated, use -fsanitize-coverage-allowlist= instead\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-coverage\",\n description: \"Specify the type of coverage instrumentation for Sanitizers\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-hwaddress-abi\",\n description: \"Select the HWAddressSanitizer ABI to target (interceptor or platform, default interceptor). This option is currently unused\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-memory-track-origins\",\n description: \"Enable origins tracking in MemorySanitizer\",\n args: {\n name: \"value\",\n isOptional: !0\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-memory-use-after-dtor\",\n description: \"Enable use-after-destroy detection in MemorySanitizer\"\n }, {\n name: \"-fsanitize-minimal-runtime\"\n }, {\n name: \"-fsanitize-recover\",\n description: \"Enable recovery for specified sanitizers\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-stats\",\n description: \"Enable sanitizer statistics gathering\"\n }, {\n name: \"-fsanitize-system-blacklist\",\n description: \"Path to system blacklist file for sanitizers\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-thread-atomics\",\n description: \"Enable atomic operations instrumentation in ThreadSanitizer (default)\"\n }, {\n name: \"-fsanitize-thread-func-entry-exit\",\n description: \"Enable function entry/exit instrumentation in ThreadSanitizer (default)\"\n }, {\n name: \"-fsanitize-thread-memory-access\",\n description: \"Enable memory access instrumentation in ThreadSanitizer (default)\"\n }, {\n name: \"-fsanitize-trap\",\n description: \"Enable trapping for sanitizers\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize-undefined-strip-path-components\",\n description: \"Strip (or keep only, if negative) a given number of path components when emitting check metadata\",\n args: {\n name: \"number\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsanitize\",\n description: \"Turn on runtime checks for various forms of undefined or suspicious behavior. See user manual for available checks\",\n args: {\n name: \"check\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsave-optimization-record\",\n description: \"Generate an optimization record file in a specific format\",\n args: {\n name: \"format\",\n default: \"YAML\",\n isOptional: !0\n },\n requiresSeparator: !0\n }, {\n name: \"-fseh-exceptions\",\n description: \"Use SEH style exceptions\"\n }, {\n name: \"-fsemantic-interposition\"\n }, {\n name: \"-fshort-enums\",\n description: \"Allocate to an enum type only as many bytes as it needs for the declared range of possible values\"\n }, {\n name: \"-fshort-wchar\",\n description: \"Force wchar_t to be a short unsigned int\"\n }, {\n name: \"-fshow-column\"\n }, {\n name: \"-fshow-overloads\",\n description: \"Which overload candidates to show when overload resolution fails: best|all; defaults to all\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fsigned-char\",\n description: \"Char is signed\"\n }, {\n name: \"-fsigned-zeros\"\n }, {\n name: \"-fsized-deallocation\",\n description: \"Enable C++14 sized global deallocation functions\"\n }, {\n name: \"-fsjlj-exceptions\",\n description: \"Use SjLj style exceptions\"\n }, {\n name: \"-fslp-vectorize\",\n description: \"Enable the superword-level parallelism vectorization passes\"\n }, {\n name: \"-fsmall-block-descriptors\",\n description: \"Enable small block descriptors\"\n }, {\n name: \"-fspell-checking\"\n }, {\n name: \"-fsplit-dwarf-inlining\",\n description: \"Provide minimal debug info in the object/executable to facilitate online symbolication/stack traces in the absence of .dwo/.dwp files when using Split DWARF\"\n }, {\n name: \"-fsplit-lto-unit\",\n description: \"Enables splitting of the LTO unit\"\n }, {\n name: \"-fsplit-machine-functions\",\n description: \"Enable late function splitting using profile information (x86 ELF)\"\n }, {\n name: \"-fstack-check\",\n description: \"Enable stack checking\"\n }, {\n name: \"-fstack-clash-protection\",\n description: \"Enable stack clash protection\"\n }, {\n name: \"-fstack-protector-all\",\n description: \"Enable stack protectors for all functions\"\n }, {\n name: \"-fstack-protector-strong\",\n description: \"Enable stack protectors for some functions vulnerable to stack smashing. Compared to -fstack-protector, this uses a stronger heuristic that includes functions containing arrays of any size (and any type), as well as any calls to alloca or the taking of an address from a local variable\"\n }, {\n name: \"-fstack-protector\",\n description: \"Enable stack protectors for some functions vulnerable to stack smashing. This uses a loose heuristic which considers functions vulnerable if they contain a char (or 8bit integer) array or constant sized calls to alloca , which are of greater size than ssp-buffer-size (default: 8 bytes). All variable sized calls to alloca are considered vulnerable. A function with a stack protector has a guard value added to the stack frame that is checked on function exit. The guard value must be positioned in the stack frame such that a buffer overflow from a vulnerable variable will overwrite the guard value before overwriting the function's return address. The reference stack guard value is stored in a global variable\"\n }, {\n name: \"-fstack-size-section\",\n description: \"Emit section containing metadata on function stack sizes\"\n }, {\n name: \"-fstandalone-debug\",\n description: \"Emit full debug info for all types used by the program\"\n }, {\n name: \"-fstrict-enums\",\n description: \"Enable optimizations based on the strict definition of an enum's value range\"\n }, {\n name: \"-fstrict-float-cast-overflow\",\n description: \"Assume that overflowing float-to-int casts are undefined (default)\"\n }, {\n name: \"-fstrict-return\"\n }, {\n name: \"-fstrict-vtable-pointers\",\n description: \"Enable optimizations based on the strict rules for overwriting polymorphic C++ objects\"\n }, {\n name: \"-fsycl\",\n description: \"Enable SYCL kernels compilation for device\"\n }, {\n name: \"-fsystem-module\",\n description: \"Build this module as a system module. Only used with -emit-module\"\n }, {\n name: \"-ftarget-variant-availability-checks\",\n description: \"Enable availability checks for the target variant platform\"\n }, {\n name: \"-ftest-coverage\"\n }, {\n name: \"-fthin-link-bitcode\",\n description: \"Write minimized bitcode to for the ThinLTO thin link only\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fthinlto-index\",\n description: \"Perform ThinLTO importing using provided function summary index\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fthreadsafe-statics\"\n }, {\n name: \"-ftime-report\",\n description: '(For new pass manager) \"per-pass\": one report for each pass; \"per-pass-run\": one report for each pass invocation',\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-ftime-trace-granularity\",\n description: \"Minimum time granularity (in microseconds) traced by time profiler\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-ftime-trace\",\n description: \"Turn on time profiler. Generates JSON file based on output filename\"\n }, {\n name: \"-ftrap-function\",\n description: \"Issue call to specified function rather than a trap instruction\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-ftrapv-handler\",\n description: \"Specify the function to be called on overflow\",\n args: {\n name: \"functionname\"\n },\n requiresSeparator: !0\n }, {\n name: \"-ftrapv\",\n description: \"Trap on integer overflow\"\n }, {\n name: \"-ftrigraphs\",\n description: \"Process trigraph sequences\"\n }, {\n name: \"-ftrivial-auto-var-init-stop-after\",\n description: \"Stop initializing trivial automatic stack variables after the specified number of instances\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-ftrivial-auto-var-init\",\n description: \"Initialize trivial automatic stack variables: uninitialized (default) | pattern\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-funique-basic-block-section-names\",\n description: \"Use unique names for basic block sections (ELF Only)\"\n }, {\n name: \"-funique-internal-linkage-names\",\n description: \"Uniqueify Internal Linkage Symbol Names by appending the MD5 hash of the module path\"\n }, {\n name: \"-funique-section-names\"\n }, {\n name: \"-funroll-loops\",\n description: \"Turn on loop unroller\"\n }, {\n name: \"-fuse-cxa-atexit\"\n }, {\n name: \"-fuse-init-array\"\n }, {\n name: \"-fuse-line-directives\",\n description: \"Use #line in preprocessed output\"\n }, {\n name: \"-fvalidate-ast-input-files-content\",\n description: \"Compute and store the hash of input files used to build an AST. Files with mismatching mtime's are considered valid if both contents is identical\"\n }, {\n name: \"-fveclib\",\n description: \"Use the given vector functions library\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fvectorize\",\n description: \"Enable the loop vectorization passes\"\n }, {\n name: \"-fverbose-asm\",\n description: \"Generate verbose assembly output\"\n }, {\n name: \"-fvirtual-function-elimination\",\n description: \"Enables dead virtual function elimination optimization. Requires -flto=full\"\n }, {\n name: \"-fvisibility-dllexport\",\n description: \"The visibility for dllexport definitions [-fvisibility-from-dllstorageclass]\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fvisibility-externs-dllimport\",\n description: \"The visibility for dllimport external declarations [-fvisibility-from-dllstorageclass]\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fvisibility-externs-nodllstorageclass\",\n description: \"The visibility for external declarations without an explicit DLL dllstorageclass [-fvisibility-from-dllstorageclass]\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fvisibility-from-dllstorageclass\",\n description: \"Set the visibility of symbols in the generated code from their DLL storage class\"\n }, {\n name: \"-fvisibility-global-new-delete-hidden\",\n description: \"Give global C++ operator new and delete declarations hidden visibility\"\n }, {\n name: \"-fvisibility-inlines-hidden-static-local-var\",\n description: \"When -fvisibility-inlines-hidden is enabled, static variables in inline C++ member functions will also be given hidden visibility by default\"\n }, {\n name: \"-fvisibility-inlines-hidden\",\n description: \"Give inline C++ member functions hidden visibility by default\"\n }, {\n name: \"-fvisibility-ms-compat\",\n description: \"Give global types 'default' visibility and global functions and variables 'hidden' visibility by default\"\n }, {\n name: \"-fvisibility-nodllstorageclass\",\n description: \"The visibility for defintiions without an explicit DLL export class [-fvisibility-from-dllstorageclass]\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fvisibility\",\n description: \"Set the default symbol visibility for all global declarations\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fwasm-exceptions\",\n description: \"Use WebAssembly style exceptions\"\n }, {\n name: \"-fwhole-program-vtables\",\n description: \"Enables whole-program vtable optimization. Requires -flto\"\n }, {\n name: \"-fwrapv\",\n description: \"Treat signed integer overflow as two's complement\"\n }, {\n name: \"-fwritable-strings\",\n description: \"Store string literals as writable data\"\n }, {\n name: \"-fxray-always-emit-customevents\",\n description: \"Always emit __xray_customevent(...) calls even if the containing function is not always instrumented\"\n }, {\n name: \"-fxray-always-emit-typedevents\",\n description: \"Always emit __xray_typedevent(...) calls even if the containing function is not always instrumented\"\n }, {\n name: \"-fxray-always-instrument\",\n description: \"DEPRECATED: Filename defining the whitelist for imbuing the 'always instrument' XRay attribute\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fxray-attr-list\",\n description: \"Filename defining the list of functions/types for imbuing XRay attributes\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fxray-function-groups\",\n description: \"Only instrument 1 of N groups\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fxray-function-index\"\n }, {\n name: \"-fxray-ignore-loops\",\n description: \"Don't instrument functions with loops unless they also meet the minimum function size\"\n }, {\n name: \"-fxray-instruction-threshold\",\n description: \"Sets the minimum function size to instrument with XRay\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fxray-instrumentation-bundle\",\n description: \"Select which XRay instrumentation points to emit. Options: all, none, function-entry, function-exit, function, custom. Default is 'all'. 'function' includes both 'function-entry' and 'function-exit'\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fxray-instrument\",\n description: \"Generate XRay instrumentation sleds on function entry and exit\"\n }, {\n name: \"-fxray-link-deps\",\n description: \"Tells clang to add the link dependencies for XRay\"\n }, {\n name: \"-fxray-modes\",\n description: \"List of modes to link in by default into XRay instrumented binaries\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fxray-never-instrument\",\n description: \"DEPRECATED: Filename defining the whitelist for imbuing the 'never instrument' XRay attribute\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fxray-selected-function-group\",\n description: \"When using -fxray-function-groups, select which group of functions to instrument. Valid range is 0 to fxray-function-groups - 1\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-fzero-initialized-in-bss\"\n }, {\n name: \"-fzvector\",\n description: \"Enable System z vector language extension\"\n }, {\n name: \"-F\",\n description: \"Add directory to framework include search path\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"--gcc-toolchain\",\n description: \"Use the gcc toolchain at the given directory\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-gcodeview-ghash\",\n description: \"Emit type record hashes in a .debug$H section\"\n }, {\n name: \"-gcodeview\",\n description: \"Generate CodeView debug information\"\n }, {\n name: \"-gcolumn-info\"\n }, {\n name: \"-gdwarf-2\",\n description: \"Generate source-level debug information with dwarf version 2\"\n }, {\n name: \"-gdwarf-3\",\n description: \"Generate source-level debug information with dwarf version 3\"\n }, {\n name: \"-gdwarf-4\",\n description: \"Generate source-level debug information with dwarf version 4\"\n }, {\n name: \"-gdwarf-5\",\n description: \"Generate source-level debug information with dwarf version 5\"\n }, {\n name: \"-gdwarf\",\n description: \"Generate source-level debug information with the default dwarf version\"\n }, {\n name: \"-gembed-source\",\n description: \"Embed source text in DWARF debug sections\"\n }, {\n name: \"-ginline-line-tables\"\n }, {\n name: \"-gline-directives-only\",\n description: \"Emit debug line info directives only\"\n }, {\n name: \"-gline-tables-only\",\n description: \"Emit debug line number tables only\"\n }, {\n name: \"-gmodules\",\n description: \"Generate debug info with external references to clang modules or precompiled headers\"\n }, {\n name: \"-gno-codeview-ghash\"\n }, {\n name: \"-gno-column-info\"\n }, {\n name: \"-gno-embed-source\",\n description: \"Restore the default behavior of not embedding source text in DWARF debug sections\"\n }, {\n name: \"-gno-inline-line-tables\",\n description: \"Don't emit inline line tables\"\n }, {\n name: \"--gpu-instrument-lib\",\n description: \"Instrument device library for HIP, which is a LLVM bitcode containing __cyg_profile_func_enter and __cyg_profile_func_exit\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"--gpu-max-threads-per-block\",\n description: \"Default max threads per block for kernel launch bounds for HIP\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-gsplit-dwarf\",\n description: \"Set DWARF fission mode to either 'split' or 'single'\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-gz\",\n description: \"DWARF debug sections compression type\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-G\",\n description: \"Put objects of at most bytes into small data section (MIPS / Hexagon)\",\n args: {\n name: \"size\"\n }\n }, {\n name: \"-g\",\n description: \"Generate source-level debug information\"\n }, {\n name: \"--help-hidden\",\n description: \"Display help for hidden options\"\n }, {\n name: \"-help\",\n description: \"Display available options\"\n }, {\n name: \"--hip-device-lib\",\n description: \"HIP device library\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"--hip-link\",\n description: \"Link clang-offload-bundler bundles for HIP\"\n }, {\n name: \"--hip-version\",\n description: \"HIP version in the format of major.minor.patch\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-H\",\n description: \"Show header includes and nesting depth\"\n }, {\n name: \"-I-\",\n description: \"Restrict all prior -I flags to double-quoted inclusion and remove current directory from include path\"\n }, {\n name: \"-iapinotes-modules\",\n description: \"Add directory to the API notes search path referenced by module name\",\n args: {\n name: \"directory\",\n template: \"folders\"\n }\n }, {\n name: \"-ibuiltininc\",\n description: \"Enable builtin #include directories even when -nostdinc is used before or after -ibuiltininc. Using -nobuiltininc after the option disables it\"\n }, {\n name: \"-idirafter\",\n description: \"Add directory to AFTER include search path\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-iframeworkwithsysroot\",\n description: \"Add directory to SYSTEM framework search path, absolute paths are relative to -isysroot\",\n args: {\n name: \"directory\",\n template: \"folders\"\n }\n }, {\n name: \"-iframework\",\n description: \"Add directory to SYSTEM framework search path\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-imacros\",\n description: \"Include macros from file before parsing\",\n args: {\n name: \"file\"\n }\n }, {\n name: \"-include-pch\",\n description: \"Include precompiled header file\",\n args: {\n name: \"file\"\n }\n }, {\n name: \"-include\",\n description: \"Include file before parsing\",\n args: {\n name: \"file\"\n }\n }, {\n name: \"-index-header-map\",\n description: \"Make the next included directory (-I or -F) an indexer header map\"\n }, {\n name: \"-index-ignore-macros\",\n description: \"Ignore macros during indexing\"\n }, {\n name: \"-index-ignore-system-symbols\",\n description: \"Ignore symbols from system headers\"\n }, {\n name: \"-index-record-codegen-name\",\n description: \"Record the codegen name for symbols\"\n }, {\n name: \"-index-store-path\",\n description: \"Enable indexing with the specified data store path\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-index-unit-output-path\",\n description: \"Use as the output path for this compilation unit in the index unit file\",\n args: {\n name: \"path\"\n }\n }, {\n name: \"-iprefix\",\n description: \"Set the -iwithprefix/-iwithprefixbefore prefix\",\n args: {\n name: \"dir\",\n template: \"folders\"\n }\n }, {\n name: \"-iquote\",\n description: \"Add directory to QUOTE include search path\",\n args: {\n name: \"directory\",\n template: \"folders\"\n }\n }, {\n name: \"-isysroot\",\n description: \"Set the system root directory (usually /)\",\n args: {\n name: \"dir\",\n template: \"folders\"\n }\n }, {\n name: \"-isystem-after\",\n description: \"Add directory to end of the SYSTEM include search path\",\n args: {\n name: \"directory\",\n template: \"folders\"\n }\n }, {\n name: \"-isystem\",\n description: \"Add directory to SYSTEM include search path\",\n args: {\n name: \"directory\",\n template: \"folders\"\n }\n }, {\n name: \"-ivfsoverlay\",\n description: \"Overlay the virtual filesystem described by file over the real file system\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-iwithprefixbefore\",\n description: \"Set directory to include search path with prefix\",\n args: {\n name: \"dir\",\n template: \"folders\"\n }\n }, {\n name: \"-iwithprefix\",\n description: \"Set directory to SYSTEM include search path with prefix\",\n args: {\n name: \"dir\",\n template: \"folders\"\n }\n }, {\n name: \"-iwithsysroot\",\n description: \"Add directory to SYSTEM include search path, absolute paths are relative to -isysroot\",\n args: {\n name: \"directory\",\n template: \"folders\"\n }\n }, {\n name: \"-I\",\n description: \"Add directory to include search path. If there are multiple -I options, these directories are searched in the order they are given before the standard system directories are searched. If the same directory is in the SYSTEM include search paths, for example if also specified with -isystem, the -I option will be ignored\",\n args: {\n name: \"dir\",\n template: \"folders\"\n }\n }, {\n name: \"--libomptarget-nvptx-path\",\n description: \"Path to libomptarget-nvptx libraries\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-L\",\n description: \"Add directory to library search path\",\n args: {\n name: \"dir\",\n template: \"folders\"\n }\n }, {\n name: \"-mabi\",\n args: {\n suggestions: [{\n name: \"vec-default\",\n description: \"Enable the default Altivec ABI on AIX (AIX only). Uses only volatile vector registers\"\n }, {\n name: \"vec-extabi\",\n description: \"Enable the extended Altivec ABI on AIX (AIX only). Uses volatile and nonvolatile vector registers\"\n }]\n },\n requiresSeparator: !0\n }, {\n name: \"-mabicalls\",\n description: \"Enable SVR4-style position-independent code (Mips only)\"\n }, {\n name: \"-maix-struct-return\",\n description: \"Return all structs in memory (PPC32 only)\"\n }, {\n name: \"-malign-branch-boundary\",\n description: \"Specify the boundary's size to align branches\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-malign-branch\",\n description: \"Specify types of branches to align\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-malign-double\",\n description: \"Align doubles to two words in structs (x86 only)\"\n }, {\n name: \"-mbackchain\",\n description: \"Link stack frames through backchain on System Z\"\n }, {\n name: \"-mbranch-protection\",\n description: \"Enforce targets of indirect branches and function returns\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mbranches-within-32B-boundaries\",\n description: \"Align selected branches (fused, jcc, jmp) within 32-byte boundary\"\n }, {\n name: \"-mcmodel\",\n args: {\n suggestions: [\"small\", \"medium\", \"medany\", \"medlow\"]\n },\n requiresSeparator: !0\n }, {\n name: \"-mcmse\",\n description: \"Allow use of CMSE (Armv8-M Security Extensions)\"\n }, {\n name: \"-mcode-object-v3\",\n description: \"Legacy option to specify code object ABI V2 (-mnocode-object-v3) or V3 (-mcode-object-v3) (AMDGPU only)\"\n }, {\n name: \"-mcode-object-version\",\n description: \"Specify code object ABI version. Defaults to 3. (AMDGPU only)\",\n args: {\n name: \"version\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mcrc\",\n description: \"Allow use of CRC instructions (ARM/Mips only)\"\n }, {\n name: \"-mcumode\",\n description: \"Specify CU (-mcumode) or WGP (-mno-cumode) wavefront execution mode (AMDGPU only)\"\n }, {\n name: \"-mdouble\",\n description: \"Force double to be 32 bits or 64 bits\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-MD\",\n description: \"Write a depfile containing user and system headers\"\n }, {\n name: \"-meabi\",\n description: \"Set EABI type, e.g. 4, 5 or gnu (default depends on triple)\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-membedded-data\",\n description: \"Place constants in the .rodata section instead of the .sdata section even if they meet the -G threshold (MIPS)\"\n }, {\n name: \"-menable-experimental-extensions\",\n description: \"Enable use of experimental RISC-V extensions\"\n }, {\n name: \"-menable-unsafe-fp-math\",\n description: \"Allow unsafe floating-point math optimizations which may decrease precision\"\n }, {\n name: \"-mexec-model\",\n description: \"Execution model (WebAssembly only)\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mexecute-only\",\n description: \"Disallow generation of data access to code sections (ARM only)\"\n }, {\n name: \"-mextern-sdata\",\n description: \"Assume that externally defined data is in the small data if it meets the -G threshold (MIPS)\"\n }, {\n name: \"-mfentry\",\n description: \"Insert calls to fentry at function entry (x86/SystemZ only)\"\n }, {\n name: \"-mfix-cortex-a53-835769\",\n description: \"Workaround Cortex-A53 erratum 835769 (AArch64 only)\"\n }, {\n name: \"-mfp32\",\n description: \"Use 32-bit floating point registers (MIPS only)\"\n }, {\n name: \"-mfp64\",\n description: \"Use 64-bit floating point registers (MIPS only)\"\n }, {\n name: \"-MF\",\n description: \"Write depfile output from -MMD, -MD, -MM, or -M to \",\n args: {\n name: \"file\"\n }\n }, {\n name: \"-mgeneral-regs-only\",\n description: \"Generate code which only uses the general purpose registers (AArch64 only)\"\n }, {\n name: \"-mglobal-merge\",\n description: \"Enable merging of globals\"\n }, {\n name: \"-mgpopt\",\n description: \"Use GP relative accesses for symbols known to be in a small data section (MIPS)\"\n }, {\n name: \"-MG\",\n description: \"Add missing headers to depfile\"\n }, {\n name: \"-mharden-sls\",\n description: \"Select straight-line speculation hardening scope\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mhvx-length\",\n description: \"Set Hexagon Vector Length\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mhvx\",\n description: \"Enable Hexagon Vector eXtensions\",\n args: {\n name: \"value\",\n isOptional: !0\n },\n requiresSeparator: !0\n }, {\n name: \"-miamcu\",\n description: \"Use Intel MCU ABI\"\n }, {\n name: \"-mignore-xcoff-visibility\",\n description: \"Not emit the visibility attribute for asm in AIX OS or give all symbols 'unspecified' visibility in XCOFF object file\"\n }, {\n name: \"--migrate\",\n description: \"Run the migrator\"\n }, {\n name: \"-mincremental-linker-compatible\",\n description: \"(integrated-as) Emit an object file which can be used with an incremental linker\"\n }, {\n name: \"-mindirect-jump\",\n description: \"Change indirect jump instructions to inhibit speculation\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mios-version-min\",\n description: \"Set iOS deployment target\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-MJ\",\n description: \"Write a compilation database entry per input\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-mllvm\",\n description: \"Additional arguments to forward to LLVM's option processing\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-mlocal-sdata\",\n description: \"Extend the -G behaviour to object local data (MIPS)\"\n }, {\n name: \"-mlong-calls\",\n description: \"Generate branches with extended addressability, usually via indirect jumps\"\n }, {\n name: \"-mlong-double-128\",\n description: \"Force long double to be 128 bits\"\n }, {\n name: \"-mlong-double-64\",\n description: \"Force long double to be 64 bits\"\n }, {\n name: \"-mlong-double-80\",\n description: \"Force long double to be 80 bits, padded to 128 bits for storage\"\n }, {\n name: \"-mlvi-cfi\",\n description: \"Enable only control-flow mitigations for Load Value Injection (LVI)\"\n }, {\n name: \"-mlvi-hardening\",\n description: \"Enable all mitigations for Load Value Injection (LVI)\"\n }, {\n name: \"-mmacosx-version-min\",\n description: \"Set Mac OS X deployment target\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mmadd4\",\n description: \"Enable the generation of 4-operand madd.s, madd.d and related instructions\"\n }, {\n name: \"-mmark-bti-property\",\n description: \"Add .note.gnu.property with BTI to assembly files (AArch64 only)\"\n }, {\n name: \"-MMD\",\n description: \"Write a depfile containing user headers\"\n }, {\n name: \"-mmemops\",\n description: \"Enable generation of memop instructions\"\n }, {\n name: \"-mms-bitfields\",\n description: \"Set the default structure layout to be compatible with the Microsoft compiler standard\"\n }, {\n name: \"-mmsa\",\n description: \"Enable MSA ASE (MIPS only)\"\n }, {\n name: \"-mmt\",\n description: \"Enable MT ASE (MIPS only)\"\n }, {\n name: \"-MM\",\n description: \"Like -MMD, but also implies -E and writes to stdout by default\"\n }, {\n name: \"-mno-abicalls\",\n description: \"Disable SVR4-style position-independent code (Mips only)\"\n }, {\n name: \"-mno-backchain\"\n }, {\n name: \"-mno-crc\",\n description: \"Disallow use of CRC instructions (Mips only)\"\n }, {\n name: \"-mno-embedded-data\",\n description: \"Do not place constants in the .rodata section instead of the .sdata if they meet the -G threshold (MIPS)\"\n }, {\n name: \"-mno-execute-only\",\n description: \"Allow generation of data access to code sections (ARM only)\"\n }, {\n name: \"-mno-extern-sdata\",\n description: \"Do not assume that externally defined data is in the small data if it meets the -G threshold (MIPS)\"\n }, {\n name: \"-mno-fix-cortex-a53-835769\",\n description: \"Don't workaround Cortex-A53 erratum 835769 (AArch64 only)\"\n }, {\n name: \"-mno-global-merge\",\n description: \"Disable merging of globals\"\n }, {\n name: \"-mno-gpopt\",\n description: \"Do not use GP relative accesses for symbols known to be in a small data section (MIPS)\"\n }, {\n name: \"-mno-hvx\",\n description: \"Disable Hexagon Vector eXtensions\"\n }, {\n name: \"-mno-implicit-float\",\n description: \"Don't generate implicit floating point instructions\"\n }, {\n name: \"-mno-incremental-linker-compatible\",\n description: \"(integrated-as) Emit an object file which cannot be used with an incremental linker\"\n }, {\n name: \"-mno-local-sdata\",\n description: \"Do not extend the -G behaviour to object local data (MIPS)\"\n }, {\n name: \"-mno-long-calls\",\n description: \"Restore the default behaviour of not generating long calls\"\n }, {\n name: \"-mno-lvi-cfi\",\n description: \"Disable control-flow mitigations for Load Value Injection (LVI)\"\n }, {\n name: \"-mno-lvi-hardening\",\n description: \"Disable mitigations for Load Value Injection (LVI)\"\n }, {\n name: \"-mno-madd4\",\n description: \"Disable the generation of 4-operand madd.s, madd.d and related instructions\"\n }, {\n name: \"-mno-memops\",\n description: \"Disable generation of memop instructions\"\n }, {\n name: \"-mno-movt\",\n description: \"Disallow use of movt/movw pairs (ARM only)\"\n }, {\n name: \"-mno-ms-bitfields\",\n description: \"Do not set the default structure layout to be compatible with the Microsoft compiler standard\"\n }, {\n name: \"-mno-msa\",\n description: \"Disable MSA ASE (MIPS only)\"\n }, {\n name: \"-mno-mt\",\n description: \"Disable MT ASE (MIPS only)\"\n }, {\n name: \"-mno-neg-immediates\",\n description: \"Disallow converting instructions with negative immediates to their negation or inversion\"\n }, {\n name: \"-mno-nvj\",\n description: \"Disable generation of new-value jumps\"\n }, {\n name: \"-mno-nvs\",\n description: \"Disable generation of new-value stores\"\n }, {\n name: \"-mno-outline-atomics\",\n description: \"Don't generate local calls to out-of-line atomic operations\"\n }, {\n name: \"-mno-outline\",\n description: \"Disable function outlining (AArch64 only)\"\n }, {\n name: \"-mno-packets\",\n description: \"Disable generation of instruction packets\"\n }, {\n name: \"-mno-pie-copy-relocations\"\n }, {\n name: \"-mno-relax\",\n description: \"Disable linker relaxation\"\n }, {\n name: \"-mno-restrict-it\",\n description: \"Allow generation of deprecated IT blocks for ARMv8. It is off by default for ARMv8 Thumb mode\"\n }, {\n name: \"-mno-save-restore\",\n description: \"Disable using library calls for save and restore\"\n }, {\n name: \"-mno-seses\",\n description: \"Disable speculative execution side effect suppression (SESES)\"\n }, {\n name: \"-mno-speculative-load-hardening\"\n }, {\n name: \"-mno-stack-arg-probe\",\n description: \"Disable stack probes which are enabled by default\"\n }, {\n name: \"-mno-tls-direct-seg-refs\",\n description: \"Disable direct TLS access through segment registers\"\n }, {\n name: \"-mno-unaligned-access\",\n description: \"Force all memory accesses to be aligned (AArch32/AArch64 only)\"\n }, {\n name: \"-mno-wavefrontsize64\",\n description: \"Specify wavefront size 32 mode (AMDGPU only)\"\n }, {\n name: \"-mnocrc\",\n description: \"Disallow use of CRC instructions (ARM only)\"\n }, {\n name: \"-mnop-mcount\",\n description: \"Generate mcount/__fentry__ calls as nops. To activate they need to be patched in\"\n }, {\n name: \"-mnvj\",\n description: \"Enable generation of new-value jumps\"\n }, {\n name: \"-mnvs\",\n description: \"Enable generation of new-value stores\"\n }, {\n name: \"-module-dependency-dir\",\n description: \"Directory to dump module dependencies to\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-module-file-info\",\n description: \"Provide information about a particular module file\"\n }, {\n name: \"-momit-leaf-frame-pointer\",\n description: \"Omit frame pointer setup for leaf functions\"\n }, {\n name: \"-moutline-atomics\",\n description: \"Generate local calls to out-of-line atomic operations\"\n }, {\n name: \"-moutline\",\n description: \"Enable function outlining (AArch64 only)\"\n }, {\n name: \"-mpacked-stack\",\n description: \"Use packed stack layout (SystemZ only)\"\n }, {\n name: \"-mpackets\",\n description: \"Enable generation of instruction packets\"\n }, {\n name: \"-mpad-max-prefix-size\",\n description: \"Specify maximum number of prefixes to use for padding\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mpie-copy-relocations\",\n description: \"Use copy relocations support for PIE builds\"\n }, {\n name: \"-mprefer-vector-width\",\n description: \"Specifies preferred vector width for auto-vectorization. Defaults to 'none' which allows target specific decisions\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-MP\",\n description: \"Create phony target for each dependency (other than main file)\"\n }, {\n name: \"-mqdsp6-compat\",\n description: \"Enable hexagon-qdsp6 backward compatibility\"\n }, {\n name: \"-MQ\",\n description: \"Specify name of main file output to quote in depfile\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-mrecord-mcount\",\n description: \"Generate a __mcount_loc section entry for each __fentry__ call\"\n }, {\n name: \"-mrelax-all\",\n description: \"(integrated-as) Relax all machine instructions\"\n }, {\n name: \"-mrelax\",\n description: \"Enable linker relaxation\"\n }, {\n name: \"-mrestrict-it\",\n description: \"Disallow generation of deprecated IT blocks for ARMv8. It is on by default for ARMv8 Thumb mode\"\n }, {\n name: \"-mrtd\",\n description: \"Make StdCall calling convention the default\"\n }, {\n name: \"-msave-restore\",\n description: \"Enable using library calls for save and restore\"\n }, {\n name: \"-mseses\",\n description: \"Enable speculative execution side effect suppression (SESES). Includes LVI control flow integrity mitigations\"\n }, {\n name: \"-msign-return-address\",\n description: \"Select return address signing scope\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-msmall-data-limit\",\n description: \"Put global and static data smaller than the limit into a special section\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-msoft-float\",\n description: \"Use software floating point\"\n }, {\n name: \"-mspeculative-load-hardening\"\n }, {\n name: \"-mstack-alignment\",\n description: \"Set the stack alignment\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mstack-arg-probe\",\n description: \"Enable stack probes\"\n }, {\n name: \"-mstack-probe-size\",\n description: \"Set the stack probe size\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mstack-protector-guard-offset\",\n description: \"Use the given offset for addressing the stack-protector guard\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mstack-protector-guard-reg\",\n description: \"Use the given reg for addressing the stack-protector guard\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mstack-protector-guard\",\n description: \"Use the given guard (global, tls) for addressing the stack-protector guard\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mstackrealign\",\n description: \"Force realign the stack at entry to every function\"\n }, {\n name: \"-msve-vector-bits\",\n description: 'Specify the size in bits of an SVE vector register. Defaults to the vector length agnostic value of \"scalable\". (AArch64 only)',\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-msvr4-struct-return\",\n description: \"Return small structs in registers (PPC32 only)\"\n }, {\n name: \"-mthread-model\",\n description: \"The thread model to use, e.g. posix, single (posix by default)\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-mtls-direct-seg-refs\",\n description: \"Enable direct TLS access through segment registers (default)\"\n }, {\n name: \"-mtls-size\",\n description: \"Specify bit size of immediate TLS offsets (AArch64 ELF only): 12 (for 4KB) | 24 (for 16MB, default) | 32 (for 4GB) | 48 (for 256TB, needs -mcmodel=large)\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mtp\",\n description: \"Thread pointer access method (AArch32/AArch64 only)\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-mtune\",\n description: \"Only supported on X86 and RISC-V. Otherwise accepted for compatibility with GCC\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-MT\",\n description: \"Specify name of main file output in depfile\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-munaligned-access\",\n description: \"Allow memory accesses to be unaligned (AArch32/AArch64 only)\"\n }, {\n name: \"-munsafe-fp-atomics\",\n description: \"Enable unsafe floating point atomic instructions (AMDGPU only)\"\n }, {\n name: \"-MV\",\n description: \"Use NMake/Jom format for the depfile\"\n }, {\n name: \"-mwavefrontsize64\",\n description: \"Specify wavefront size 64 mode (AMDGPU only)\"\n }, {\n name: \"-M\",\n description: \"Like -MD, but also implies -E and writes to stdout by default\"\n }, {\n name: \"--no-cuda-include-ptx\",\n description: \"Do not include PTX for the following GPU architecture (e.g. sm_35) or 'all'. May be specified more than once\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"--no-cuda-version-check\",\n description: \"Don't error out if the detected version of the CUDA install is too low for the requested CUDA gpu architecture\"\n }, {\n name: \"--no-offload-arch\",\n description: \"Remove CUDA/HIP offloading device architecture (e.g. sm_35, gfx906) from the list of devices to compile for. 'all' resets the list to its default value\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-no-pthread\"\n }, {\n name: \"--no-system-header-prefix\",\n description: \"Treat all #include paths starting with as not including a system header\",\n args: {\n name: \"prefix\"\n },\n requiresSeparator: !0\n }, {\n name: \"-nobuiltininc\",\n description: \"Disable builtin #include directories\"\n }, {\n name: \"-nogpuinc\",\n description: \"Do not add include paths for CUDA/HIP and do not include the default CUDA/HIP wrapper headers\"\n }, {\n name: \"-nogpulib\",\n description: \"Do not link device library for CUDA/HIP device compilation\"\n }, {\n name: \"-nostdinc++\",\n description: \"Disable standard #include directories for the C++ standard library\"\n }, {\n name: \"-ObjC++\",\n description: \"Treat source input files as Objective-C++ inputs\"\n }, {\n name: \"-objcmt-atomic-property\",\n description: \"Make migration to 'atomic' properties\"\n }, {\n name: \"-objcmt-migrate-all\",\n description: \"Enable migration to modern ObjC\"\n }, {\n name: \"-objcmt-migrate-annotation\",\n description: \"Enable migration to property and method annotations\"\n }, {\n name: \"-objcmt-migrate-designated-init\",\n description: \"Enable migration to infer NS_DESIGNATED_INITIALIZER for initializer methods\"\n }, {\n name: \"-objcmt-migrate-instancetype\",\n description: \"Enable migration to infer instancetype for method result type\"\n }, {\n name: \"-objcmt-migrate-literals\",\n description: \"Enable migration to modern ObjC literals\"\n }, {\n name: \"-objcmt-migrate-ns-macros\",\n description: \"Enable migration to NS_ENUM/NS_OPTIONS macros\"\n }, {\n name: \"-objcmt-migrate-property-dot-syntax\",\n description: \"Enable migration of setter/getter messages to property-dot syntax\"\n }, {\n name: \"-objcmt-migrate-property\",\n description: \"Enable migration to modern ObjC property\"\n }, {\n name: \"-objcmt-migrate-protocol-conformance\",\n description: \"Enable migration to add protocol conformance on classes\"\n }, {\n name: \"-objcmt-migrate-readonly-property\",\n description: \"Enable migration to modern ObjC readonly property\"\n }, {\n name: \"-objcmt-migrate-readwrite-property\",\n description: \"Enable migration to modern ObjC readwrite property\"\n }, {\n name: \"-objcmt-migrate-subscripting\",\n description: \"Enable migration to modern ObjC subscripting\"\n }, {\n name: \"-objcmt-ns-nonatomic-iosonly\",\n description: \"Enable migration to use NS_NONATOMIC_IOSONLY macro for setting property's 'atomic' attribute\"\n }, {\n name: \"-objcmt-returns-innerpointer-property\",\n description: \"Enable migration to annotate property with NS_RETURNS_INNER_POINTER\"\n }, {\n name: \"-objcmt-whitelist-dir-path\",\n description: \"Only modify files with a filename contained in the provided directory path\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-ObjC\",\n description: \"Treat source input files as Objective-C inputs\"\n }, {\n name: \"--offload-arch\",\n description: \"CUDA offloading device architecture (e.g. sm_35), or HIP offloading target ID in the form of a device architecture followed by target ID features delimited by a colon. Each target ID feature is a pre-defined string followed by a plus or minus sign (e.g. gfx908:xnack+:sramecc-). May be specified more than once\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-o\",\n description: \"Write output to \",\n args: {\n name: \"file\"\n }\n }, {\n name: \"-pg\",\n description: \"Enable mcount instrumentation\"\n }, {\n name: \"-pipe\",\n description: \"Use pipes between commands, when possible\"\n }, {\n name: \"--precompile\",\n description: \"Only precompile the input\"\n }, {\n name: \"-print-effective-triple\",\n description: \"Print the effective target triple\"\n }, {\n name: \"-print-file-name\",\n description: \"Print the full library path of \",\n args: {\n name: \"file\"\n },\n requiresSeparator: !0\n }, {\n name: \"-print-ivar-layout\",\n description: \"Enable Objective-C Ivar layout bitmap print trace\"\n }, {\n name: \"-print-libgcc-file-name\",\n description: 'Print the library path for the currently used compiler runtime library (\"libgcc.a\" or \"libclang_rt.builtins.*.a\")'\n }, {\n name: \"-print-prog-name\",\n description: \"Print the full program path of \",\n args: {\n name: \"name\"\n },\n requiresSeparator: !0\n }, {\n name: \"-print-resource-dir\",\n description: \"Print the resource directory pathname\"\n }, {\n name: \"-print-search-dirs\",\n description: \"Print the paths used for finding libraries and programs\"\n }, {\n name: \"-print-supported-cpus\",\n description: \"Print supported cpu models for the given target (if target is not specified, it will print the supported cpus for the default target)\"\n }, {\n name: \"-print-target-triple\",\n description: \"Print the normalized target triple\"\n }, {\n name: \"-print-targets\",\n description: \"Print the registered targets\"\n }, {\n name: \"-pthread\",\n description: \"Support POSIX threads in generated code\"\n }, {\n name: \"--ptxas-path\",\n description: \"Path to ptxas (used for compiling CUDA code)\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-P\",\n description: \"Disable linemarker output in -E mode\"\n }, {\n name: \"-Qn\",\n description: \"Do not emit metadata containing compiler name and version\"\n }, {\n name: \"-Qunused-arguments\",\n description: \"Don't emit warning for unused driver arguments\"\n }, {\n name: \"-Qy\",\n description: \"Emit metadata containing compiler name and version\"\n }, {\n name: \"-relocatable-pch\",\n description: \"Whether to build a relocatable precompiled header\"\n }, {\n name: \"-rewrite-legacy-objc\",\n description: \"Rewrite Legacy Objective-C source to C++\"\n }, {\n name: \"-rewrite-objc\",\n description: \"Rewrite Objective-C source to C++\"\n }, {\n name: \"--rocm-device-lib-path\",\n description: \"ROCm device library path. Alternative to rocm-path\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"--rocm-path\",\n description: \"ROCm installation path, used for finding and automatically linking required bitcode libraries\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-Rpass-analysis\",\n description: \"Report transformation analysis from optimization passes whose name matches the given POSIX regular expression\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-Rpass-missed\",\n description: \"Report missed transformations by optimization passes whose name matches the given POSIX regular expression\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-Rpass\",\n description: \"Report transformations performed by optimization passes whose name matches the given POSIX regular expression\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-rtlib\",\n description: \"Compiler runtime library to use\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-Rremark\",\n description: \"Enable the specified remark\",\n args: {\n name: \"\"\n }\n }, {\n name: \"-save-stats\",\n description: \"Save llvm statistics\",\n args: {\n name: \"value\",\n isOptional: !0\n },\n requiresSeparator: !0\n }, {\n name: \"-save-temps\",\n description: \"Save intermediate compilation results\",\n args: {\n name: \"value\",\n isOptional: !0\n },\n requiresSeparator: !0\n }, {\n name: \"-serialize-diagnostics\",\n description: \"Serialize compiler diagnostics to a file\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"-shared-libsan\",\n description: \"Dynamically link the sanitizer runtime\"\n }, {\n name: \"-static-libsan\",\n description: \"Statically link the sanitizer runtime\"\n }, {\n name: \"-static-openmp\",\n description: \"Use the static host OpenMP runtime while linking\"\n }, {\n name: \"-stdlib++-isystem\",\n description: \"Use directory as the C++ standard library include path\",\n args: {\n name: \"directory\",\n template: \"folders\"\n }\n }, {\n name: \"-stdlib\",\n description: \"C++ standard library to use\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-sycl-std\",\n description: \"SYCL language standard to compile for\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"--system-header-prefix\",\n description: \"Treat all #include paths starting with as including a system header\",\n args: {\n name: \"prefix\"\n },\n requiresSeparator: !0\n }, {\n name: \"-S\",\n description: \"Only run preprocess and compilation steps\"\n }, {\n name: \"-target-variant\",\n description: \"Generate code that may run on a particular variant of the deployment target\",\n args: {\n name: \"value\"\n }\n }, {\n name: \"--target\",\n description: \"Generate code for the given target\",\n args: {\n name: \"value\"\n },\n requiresSeparator: !0\n }, {\n name: \"-Tbss\",\n description: \"Set starting address of BSS to \",\n args: {\n name: \"addr\"\n }\n }, {\n name: \"-Tdata\",\n description: \"Set starting address of DATA to \",\n args: {\n name: \"addr\"\n }\n }, {\n name: \"-time\",\n description: \"Time individual commands\"\n }, {\n name: \"-traditional-cpp\",\n description: \"Enable some traditional CPP emulation\"\n }, {\n name: \"-trigraphs\",\n description: \"Process trigraph sequences\"\n }, {\n name: \"-Ttext\",\n description: \"Set starting address of TEXT to \",\n args: {\n name: \"addr\"\n }\n }, {\n name: \"-T\",\n description: \"Specify