diff --git a/compiler-rs/clients_schema/src/lib.rs b/compiler-rs/clients_schema/src/lib.rs index d8f2d16f3c..cf825965f0 100644 --- a/compiler-rs/clients_schema/src/lib.rs +++ b/compiler-rs/clients_schema/src/lib.rs @@ -482,6 +482,25 @@ impl TypeDefinition { } } +/** + * The Example type is used for both requests and responses + * This type definition is taken from the OpenAPI spec + * https://spec.openapis.org/oas/v3.1.0#example-object + * With the exception of using String as the 'value' type. + * This type matches the 'Example' type in metamodel.ts. The + * data serialized by the Typescript code in schema.json, + * needs to be deserialized into this equivalent type. + * The OpenAPI v3 spec also defines the 'Example' type, so + * to distinguish them, this type is called SchemaExample. + */ +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SchemaExample { + pub summary: Option, + pub description: Option, + pub value: Option, + pub external_value: Option, +} + /// Common attributes for all type definitions #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -675,6 +694,8 @@ pub struct Request { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub attached_behaviors: Vec, + + pub examples: Option> } impl WithBaseType for Request { @@ -703,6 +724,8 @@ pub struct Response { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub exceptions: Vec, + + pub examples: Option> } impl WithBaseType for Response { diff --git a/compiler-rs/clients_schema_to_openapi/src/paths.rs b/compiler-rs/clients_schema_to_openapi/src/paths.rs index ed7e145ce9..62fd16dc98 100644 --- a/compiler-rs/clients_schema_to_openapi/src/paths.rs +++ b/compiler-rs/clients_schema_to_openapi/src/paths.rs @@ -20,12 +20,15 @@ use std::fmt::Write; use anyhow::{anyhow, bail}; use clients_schema::Property; +use indexmap::IndexMap; use indexmap::indexmap; use icu_segmenter::SentenceSegmenter; use openapiv3::{ MediaType, Parameter, ParameterData, ParameterSchemaOrContent, PathItem, PathStyle, Paths, QueryStyle, ReferenceOr, - RequestBody, Response, Responses, StatusCode, + RequestBody, Response, Responses, StatusCode, Example }; +use clients_schema::SchemaExample; +use serde_json::json; use crate::components::TypesAndComponents; @@ -116,15 +119,42 @@ pub fn add_endpoint( //---- Prepare request body + // This function converts the IndexMap examples of + // schema.json to IndexMap> which is the format + // that OpenAPI expects. + fn get_openapi_examples(schema_examples: IndexMap) -> IndexMap> { + let mut openapi_examples = indexmap! {}; + for (name, schema_example) in schema_examples { + let openapi_example = Example { + value: Some(json!(schema_example.value)), + description: schema_example.description.clone(), + summary: schema_example.summary.clone(), + external_value: None, + extensions: Default::default(), + }; + openapi_examples.insert(name.clone(), ReferenceOr::Item(openapi_example)); + } + return openapi_examples; + } + + + let mut request_examples: IndexMap> = indexmap! {}; + // If this endpoint request has examples in schema.json, convert them to the + // OpenAPI format and add them to the endpoint request in the OpenAPI document. + if request.examples.is_some() { + request_examples = get_openapi_examples(request.examples.as_ref().unwrap().clone()); + } + let request_body = tac.convert_request(request)?.map(|schema| { let media = MediaType { schema: Some(schema), example: None, - examples: Default::default(), + examples: request_examples, encoding: Default::default(), extensions: Default::default(), }; + let body = RequestBody { description: None, // FIXME: nd-json requests @@ -142,9 +172,16 @@ pub fn add_endpoint( //---- Prepare request responses + // FIXME: buggy for responses with no body // TODO: handle binary responses let response_def = tac.model.get_response(endpoint.response.as_ref().unwrap())?; + let mut response_examples: IndexMap> = indexmap! {}; + // If this endpoint response has examples in schema.json, convert them to the + // OpenAPI format and add them to the endpoint response in the OpenAPI document. + if response_def.examples.is_some() { + response_examples = get_openapi_examples(response_def.examples.as_ref().unwrap().clone()); + } let response = Response { description: "".to_string(), headers: Default::default(), @@ -152,7 +189,7 @@ pub fn add_endpoint( "application/json".to_string() => MediaType { schema: tac.convert_response(response_def)?, example: None, - examples: Default::default(), + examples: response_examples, encoding: Default::default(), extensions: Default::default(), } diff --git a/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm b/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm index 1a8bba3604..7eddf69b79 100644 Binary files a/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm and b/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm differ diff --git a/compiler/src/index.ts b/compiler/src/index.ts index cdf6827bfb..b1977567c7 100644 --- a/compiler/src/index.ts +++ b/compiler/src/index.ts @@ -28,6 +28,7 @@ import validateModel from './steps/validate-model' import addContentType from './steps/add-content-type' import readDefinitionValidation from './steps/read-definition-validation' import addDeprecation from './steps/add-deprecation' +import ExamplesProcessor from './steps/add-examples' const nvmrc = readFileSync(join(__dirname, '..', '..', '.nvmrc'), 'utf8') const nodejsMajor = process.version.split('.').shift()?.slice(1) ?? '' @@ -65,6 +66,9 @@ if (outputFolder === '' || outputFolder === undefined) { const compiler = new Compiler(specsFolder, outputFolder) +const examplesProcessor = new ExamplesProcessor(specsFolder) +const addExamples = examplesProcessor.addExamples.bind(examplesProcessor) + compiler .generateModel() .step(addInfo) @@ -74,6 +78,7 @@ compiler .step(validateRestSpec) .step(addDescription) .step(validateModel) + .step(addExamples) .write() .then(() => { console.log('Done') diff --git a/compiler/src/model/metamodel.ts b/compiler/src/model/metamodel.ts index 9f01d7a956..37a0de06ed 100644 --- a/compiler/src/model/metamodel.ts +++ b/compiler/src/model/metamodel.ts @@ -260,6 +260,19 @@ export class Interface extends BaseType { variants?: Container } +/** + * The Example type is used for both requests and responses + * This type definition is taken from the OpenAPI spec + * https://spec.openapis.org/oas/v3.1.0#example-object + * With the exception of using String as the 'value' type + */ +export class Example { + summary?: string + description?: string + value?: string + external_value?: string +} + /** * A request type */ @@ -288,6 +301,7 @@ export class Request extends BaseType { body: Body behaviors?: Behavior[] attachedBehaviors?: string[] + examples?: Map } /** @@ -300,6 +314,7 @@ export class Response extends BaseType { behaviors?: Behavior[] attachedBehaviors?: string[] exceptions?: ResponseException[] + examples?: Map } export class ResponseException { diff --git a/compiler/src/steps/add-examples.ts b/compiler/src/steps/add-examples.ts new file mode 100644 index 0000000000..592a1f61e0 --- /dev/null +++ b/compiler/src/steps/add-examples.ts @@ -0,0 +1,261 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import * as model from '../model/metamodel' +import { JsonSpec } from '../model/json-spec' +import * as path from 'path' +import * as fs from 'fs' +import * as yaml from 'js-yaml' + +/** + * Scan the API folders in the specification to locate examples + * for all the API endpoints. Then add the examples to the model. + */ +export default class ExamplesProcessor { + specsFolder: string + + constructor (specsFolder: string) { + this.specsFolder = specsFolder + } + + // Add request and response examples for all the endpoints in the model. + // Note that the 'jsonSpec' is a parameter that is passed to a 'Step'. + // We don't need that parameter for the the 'addExamples' functionality. + async addExamples (model: model.Model, jsonSpec: Map): Promise { + const requestExamplesProcessor = new RequestExamplesProcessor(model, this.specsFolder) + const responseExamplesProcessor = new ResponseExamplesProcessor(model, this.specsFolder) + for (const endpoint of model.endpoints) { + if (endpoint.request != null) { requestExamplesProcessor.addExamples(endpoint.request) } + if (endpoint.response != null) { responseExamplesProcessor.addExamples(endpoint.response) } + } + return model + } +} + +/* + * Base class for the request and response examples processors. + */ +class BaseExamplesProcessor { + model: model.Model + specsFolder: string + + constructor (model: model.Model, specsFolder: string) { + this.model = model + this.specsFolder = specsFolder + } + + // Log a 'warning' message. + warning (message: string): void { + console.warn('=== [ExamplesProcessor]: ' + message) + } + + // Get all the subfolders in a folder. + getSubfolders (folderPath: string): string[] { + const entries = fs.readdirSync(folderPath, { withFileTypes: true }) + const folders = entries + .filter(entry => entry.isDirectory()) + .map(entry => entry.name) + return folders + } + + // Get all the files in a folder. + getFilesInFolder (folderPath: string): string[] { + const entries = fs.readdirSync(folderPath, { withFileTypes: true }) + const files = entries + .filter(entry => entry.isFile()) + .map(entry => entry.name) + return files + } + + // Check if a path exists and is a directory. + isDirectory (path: string): boolean { + try { + const stats = fs.statSync(path) + return stats.isDirectory() + } catch (error) { + if (error.code === 'ENOENT') { + // Path does not exist + return false + } else { + // Other error, rethrow + throw error + } + } + } + + // Given the spec location of a request or response, + // return the path to the examples folder for that + // request or response. + getExamplesFolder (specLocation: string): string | undefined { + const specDir = path.dirname(specLocation) + const specPath = path.join(this.specsFolder, specDir) + const examplesFolder = path.join(specPath, 'examples') + if (this.isDirectory(examplesFolder)) { + return examplesFolder + } + return undefined + } + + // Given an examples request or response folder, return all the + // valid example files in that folder. + getExampleFiles (folder: string): string[] { + if (!this.isDirectory(folder)) { + return [] + } + // Currently we only allow YAML example files. + const exampleFiles = this.getFilesInFolder(folder) + .filter(file => file.endsWith('.yml') || file.endsWith('.yaml')) + if (exampleFiles.length === 0) { + this.warning(`No example files found in ${folder}`) + return [] + } + return exampleFiles + } + + // Look up all the example files in a folder. Use the filename without extension + // as the name of the example, and the YAML content as the example value. + // Return a map of example names to example values. + getExampleMap (folder: string): Map { + const exampleFiles = this.getExampleFiles(folder) + const examples = new Map() + for (const fileName of exampleFiles) { + const filePath = path.join(folder, fileName) + const exampleFileContent = fs.readFileSync(filePath, 'utf8') + const exampleName = path.basename(fileName, path.extname(fileName)) + const example: model.Example = yaml.load(exampleFileContent) + // Some of the example files set their 'value' as a JSON string, + // and some files set their 'value' as an object. For consistency, + // if the value is not a JSON string, convert it to a JSON string. + if (typeof example.value !== 'string') { + // Convert to prettified JSON string + example.value = JSON.stringify(example.value, null, 2) + } + examples[exampleName] = example + } + return examples + } +} + +/* + * Class to add the examples for an API request + */ +class RequestExamplesProcessor extends BaseExamplesProcessor { + // Traverse all the types in the model to find a type that is + // of type 'request' and has the same name and namespace as the request. + getRequestDefinition (model: model.Model, request: model.TypeName): model.Request { + for (const type of model.types) { + if (type.kind === 'request') { + if (type.name.name === request.name && type.name.namespace === request.namespace) { + return type + } + } + } + throw new Error(`Can't find the request definiton for ${request.namespace}.${request.name}`) + } + + // Given the spec location, return the request examples folder, if it exists. + getExamplesRequestSubfolder (examplesSubfolder: string): string | undefined { + const subFolder = path.join(examplesSubfolder, 'request') + if (this.isDirectory(subFolder)) { + return subFolder + } + return undefined + } + + // Find all the request examples for this request and add them to the model. + addExamples (request: model.TypeName): void { + const requestDefinition = this.getRequestDefinition(this.model, request) + const examplesFolder = this.getExamplesFolder(requestDefinition.specLocation) + if (examplesFolder === undefined) { + return + } + // Get the request examples subfolder. + const examplesRequestSubfolder = this.getExamplesRequestSubfolder(examplesFolder) + // If there is an examples/request folder, add the request examples to the model. + if (examplesRequestSubfolder !== undefined) { + requestDefinition.examples = this.getExampleMap(examplesRequestSubfolder) + } + } +} + +/* + * Class to add the examples for an API response + */ +class ResponseExamplesProcessor extends BaseExamplesProcessor { + // Traverse all the types in the model to find a type that is + // of type 'response' and has the same name and namespace as the response. + getResponseDefinition (model: model.Model, response: model.TypeName): model.Response { + for (const type of model.types) { + if (type.kind === 'response') { + if (type.name.name === response.name && type.name.namespace === response.namespace) { + return type + } + } + } + throw new Error(`Can't find the response definiton for ${response.namespace}.${response.name}`) + } + + // Given the spec location, return the response example folders if they exists. + // A response example folder can be of either of these forms: + // response + // {nnn}_response + // Where {nnn} is the HTTP response code. If the folder is named 'response', + // assume that the response code is 200, otherwise pick up the response code + // from the folder name. + // Return a map of status code to the folder path. + getExamplesResponseSubfolderMap (examplesSubfolder: string): Map | undefined { + const subfolders = this.getSubfolders(examplesSubfolder) + // If we have a "response" subfolder, stop there and return. + // We should not have a mix of response and {nnn}_response folders. + if ('response' in subfolders) { + const responseSubfolder = path.join(examplesSubfolder, 'response') + return new Map([['200', responseSubfolder]]) + } + // Look for subfolders of the format '{nnn}_response'. + const rspSubfolders = subfolders.filter(folder => folder.endsWith('_response')) + const responseTypeMap = new Map() + for (const rspSubfolder of rspSubfolders) { + const match = rspSubfolder.match(/^([0-9]{3})_response$/) + if (match == null) { + throw new Error(`Unexpected response folder: ${rspSubfolder}`) + } + const statusCode = match[1] + const responseSubfolder = path.join(examplesSubfolder, rspSubfolder) + responseTypeMap.set(statusCode, responseSubfolder) + } + return responseTypeMap + } + + // Find all the response examples for this request and add them to the model. + addExamples (response: model.TypeName): void { + const responseDefinition = this.getResponseDefinition(this.model, response) + const examplesFolder = this.getExamplesFolder(responseDefinition.specLocation) + if (examplesFolder === undefined) { + return + } + // Get a map of status code to response example subfolder. + const examplesResponseSubfolderMap = this.getExamplesResponseSubfolderMap(examplesFolder) + const examples200ResponseSubfolder = examplesResponseSubfolderMap?.get('200') + // If there is an examples/response or examples/200_response folder, + // add the response examples to the model. + if (examples200ResponseSubfolder !== undefined) { + responseDefinition.examples = this.getExampleMap(examples200ResponseSubfolder) + } + } +} diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 3b030d3111..78290aea66 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -67,6 +67,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/async_search._types:AsyncSearchDocumentResponseBase" + }, + "examples": { + "AsyncSearchGetResponseExample1": { + "description": "A succesful response from `GET /_async_search/FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=`.", + "value": "{\n \"id\" : \"FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=\",\n \"is_partial\" : false, \n \"is_running\" : false, \n \"start_time_in_millis\" : 1583945890986,\n \"expiration_time_in_millis\" : 1584377890986, \n \"completion_time_in_millis\" : 1583945903130, \n \"response\" : {\n \"took\" : 12144,\n \"timed_out\" : false,\n \"num_reduce_phases\" : 46, \n \"_shards\" : {\n \"total\" : 562,\n \"successful\" : 188, \n \"skipped\" : 0,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : {\n \"value\" : 456433,\n \"relation\" : \"eq\"\n },\n \"max_score\" : null,\n \"hits\" : [ ]\n },\n \"aggregations\" : { \n \"sale_date\" : {\n \"buckets\" : []\n }\n }\n }\n}" + } } } } @@ -147,6 +153,23 @@ "application/json": { "schema": { "$ref": "#/components/schemas/async_search.status:StatusResponseBase" + }, + "examples": { + "AsyncSearchStatusResponseExample1": { + "summary": "An active async search", + "description": "A succesful response from `GET /_async_search/status/FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=`, which retrieves the status of a previously submitted async search without the results.", + "value": "{\n \"id\" : \"FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=\",\n \"is_running\" : true,\n \"is_partial\" : true,\n \"start_time_in_millis\" : 1583945890986,\n \"expiration_time_in_millis\" : 1584377890986,\n \"_shards\" : {\n \"total\" : 562,\n \"successful\" : 188, \n \"skipped\" : 0,\n \"failed\" : 0\n }\n}" + }, + "AsyncSearchStatusResponseExample2": { + "summary": "A completed async search", + "description": "A succesful response from `GET /_async_search/status/FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=` for an async search that has completed. The status response has an additional `completion_status` field that shows the status code of the completed async search.\n", + "value": "{\n \"id\" : \"FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=\",\n \"is_running\" : false,\n \"is_partial\" : false,\n \"start_time_in_millis\" : 1583945890986,\n \"expiration_time_in_millis\" : 1584377890986,\n \"_shards\" : {\n \"total\" : 562,\n \"successful\" : 562,\n \"skipped\" : 0,\n \"failed\" : 0\n },\n\"completion_status\" : 200 \n}" + }, + "AsyncSearchStatusResponseExample3": { + "summary": "A failed async search", + "description": "A response from `GET /_async_search/status/FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=` for an async search that has completed with an error. The status response has an additional `completion_status` field that shows the status code of the completed async search.\n", + "value": "{\n \"id\" : \"FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=\",\n \"is_running\" : false,\n \"is_partial\" : true,\n \"start_time_in_millis\" : 1583945890986,\n \"expiration_time_in_millis\" : 1584377890986,\n \"_shards\" : {\n \"total\" : 562,\n \"successful\" : 450,\n \"skipped\" : 0,\n \"failed\" : 112\n },\n\"completion_status\" : 503 \n}" + } } } } @@ -554,6 +577,17 @@ "application/json": { "schema": { "$ref": "#/components/schemas/autoscaling._types:AutoscalingPolicy" + }, + "examples": { + "PutAutoscalingPolicyRequestExample1": { + "summary": "Creates or updates an autoscaling policy.", + "value": "{\n \"roles\": [],\n \"deciders\": {\n \"fixed\": {\n }\n }\n}" + }, + "PutAutoscalingPolicyRequestExample2": { + "summary": "Creates an autoscaling policy.", + "description": "The API method and path for this request: `PUT /_autoscaling/policy/my_autoscaling_policy`. It creates `my_autoscaling_policy` using the fixed autoscaling decider, applying to the set of nodes having (only) the `data_hot` role.", + "value": "{\n \"roles\" : [ \"data_hot\" ],\n \"deciders\": {\n \"fixed\": {\n }\n }\n}" + } } } }, @@ -623,6 +657,13 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "DeleteAutoscalingPolicyResponseExample1": { + "summary": "A successful response of deleting one or more autoscaling policy.", + "description": "This may be a response to either `DELETE /_autoscaling/policy/my_autoscaling_policy` or `DELETE /_autoscaling/policy/*`.\n", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -2552,6 +2593,12 @@ "required": [ "remote_cluster" ] + }, + "examples": { + "PutAutoFollowPatternRequestExample1": { + "description": "Run `PUT /_ccr/auto_follow/my_auto_follow_pattern` to creates an auto-follow pattern.\n", + "value": "{\n \"remote_cluster\" : \"remote_cluster\",\n \"leader_index_patterns\" :\n [\n \"leader_index*\"\n ],\n \"follow_index_pattern\" : \"{{leader_index}}-follower\",\n \"settings\": {\n \"index.number_of_replicas\": 0\n },\n \"max_read_request_operation_count\" : 1024,\n \"max_outstanding_read_requests\" : 16,\n \"max_read_request_size\" : \"1024k\",\n \"max_write_request_operation_count\" : 32768,\n \"max_write_request_size\" : \"16k\",\n \"max_outstanding_write_requests\" : 8,\n \"max_write_buffer_count\" : 512,\n \"max_write_buffer_size\" : \"512k\",\n \"max_retry_delay\" : \"10s\",\n \"read_poll_timeout\" : \"30s\"\n}" + } } } }, @@ -2564,6 +2611,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "PutAutoFollowPatternResponseExample1": { + "description": "A successful response for creating an auto-follow pattern.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -2611,6 +2664,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "DeleteAutoFollowPatternResponseExample1": { + "description": "A successful response from `DELETE /_ccr/auto_follow/my_auto_follow_pattern`, which deletes an auto-follow pattern.", + "value": "{\n \"acknowledged\" : true\n}" + } } } } @@ -2720,6 +2779,12 @@ "leader_index", "remote_cluster" ] + }, + "examples": { + "CreateFollowIndexRequestExample1": { + "description": "Run `PUT /follower_index/_ccr/follow?wait_for_active_shards=1` to create a follower index named `follower_index`.", + "value": "{\n \"remote_cluster\" : \"remote_cluster\",\n \"leader_index\" : \"leader_index\",\n \"settings\": {\n \"index.number_of_replicas\": 0\n },\n \"max_read_request_operation_count\" : 1024,\n \"max_outstanding_read_requests\" : 16,\n \"max_read_request_size\" : \"1024k\",\n \"max_write_request_operation_count\" : 32768,\n \"max_write_request_size\" : \"16k\",\n \"max_outstanding_write_requests\" : 8,\n \"max_write_buffer_count\" : 512,\n \"max_write_buffer_size\" : \"512k\",\n \"max_retry_delay\" : \"10s\",\n \"read_poll_timeout\" : \"30s\"\n}" + } } } }, @@ -2748,6 +2813,12 @@ "follow_index_shards_acked", "index_following_started" ] + }, + "examples": { + "CreateFollowIndexResponseExample1": { + "description": "A successful response from `PUT /follower_index/_ccr/follow?wait_for_active_shards=1`.", + "value": "{\n \"follow_index_created\" : true,\n \"follow_index_shards_acked\" : true,\n \"index_following_started\" : true\n}" + } } } } @@ -2808,6 +2879,18 @@ "required": [ "follower_indices" ] + }, + "examples": { + "FollowInfoResponseExample1": { + "summary": "An active follower index", + "description": "A successful response from `GET /follower_index/_ccr/info` when the follower index is active.", + "value": "{\n \"follower_indices\": [\n {\n \"follower_index\": \"follower_index\",\n \"remote_cluster\": \"remote_cluster\",\n \"leader_index\": \"leader_index\",\n \"status\": \"active\",\n \"parameters\": {\n \"max_read_request_operation_count\": 5120,\n \"max_read_request_size\": \"32mb\",\n \"max_outstanding_read_requests\": 12,\n \"max_write_request_operation_count\": 5120,\n \"max_write_request_size\": \"9223372036854775807b\",\n \"max_outstanding_write_requests\": 9,\n \"max_write_buffer_count\": 2147483647,\n \"max_write_buffer_size\": \"512mb\",\n \"max_retry_delay\": \"500ms\",\n \"read_poll_timeout\": \"1m\"\n }\n }\n ]\n}" + }, + "FollowInfoResponseExample2": { + "summary": "A paused follower index", + "description": "A successful response from `GET /follower_index/_ccr/info` when the follower index is paused.", + "value": "{\n \"follower_indices\": [\n {\n \"follower_index\": \"follower_index\",\n \"remote_cluster\": \"remote_cluster\",\n \"leader_index\": \"leader_index\",\n \"status\": \"paused\"\n }\n ]\n}" + } } } } @@ -2869,6 +2952,12 @@ "required": [ "indices" ] + }, + "examples": { + "FollowIndexStatsResponseExample1": { + "description": "A successful response from `GET /follower_index/_ccr/stats`, which retrieves follower stats.", + "value": "{\n \"indices\" : [\n {\n \"index\" : \"follower_index\",\n \"total_global_checkpoint_lag\" : 256,\n \"shards\" : [\n {\n \"remote_cluster\" : \"remote_cluster\",\n \"leader_index\" : \"leader_index\",\n \"follower_index\" : \"follower_index\",\n \"shard_id\" : 0,\n \"leader_global_checkpoint\" : 1024,\n \"leader_max_seq_no\" : 1536,\n \"follower_global_checkpoint\" : 768,\n \"follower_max_seq_no\" : 896,\n \"last_requested_seq_no\" : 897,\n \"outstanding_read_requests\" : 8,\n \"outstanding_write_requests\" : 2,\n \"write_buffer_operation_count\" : 64,\n \"follower_mapping_version\" : 4,\n \"follower_settings_version\" : 2,\n \"follower_aliases_version\" : 8,\n \"total_read_time_millis\" : 32768,\n \"total_read_remote_exec_time_millis\" : 16384,\n \"successful_read_requests\" : 32,\n \"failed_read_requests\" : 0,\n \"operations_read\" : 896,\n \"bytes_read\" : 32768,\n \"total_write_time_millis\" : 16384,\n \"write_buffer_size_in_bytes\" : 1536,\n \"successful_write_requests\" : 16,\n \"failed_write_requests\" : 0,\n \"operations_written\" : 832,\n \"read_exceptions\" : [ ],\n \"time_since_last_read_millis\" : 8\n }\n ]\n }\n ]\n}" + } } } } @@ -2930,6 +3019,12 @@ "type": "string" } } + }, + "examples": { + "ForgetFollowerIndexRequestExample1": { + "description": "Run `POST //_ccr/forget_follower`.", + "value": "{\n \"follower_cluster\" : \"\",\n \"follower_index\" : \"\",\n \"follower_index_uuid\" : \"\",\n \"leader_remote_cluster\" : \"\"\n}" + } } } }, @@ -2950,6 +3045,12 @@ "required": [ "_shards" ] + }, + "examples": { + "ForgetFollowerIndexResponseExample1": { + "description": "A successful response for removing the follower retention leases from the leader index.\n", + "value": "{\n \"_shards\" : {\n \"total\" : 1,\n \"successful\" : 1,\n \"failed\" : 0,\n \"failures\" : [ ]\n }\n}" + } } } } @@ -3023,6 +3124,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "PauseAutoFollowPatternResponseExample1": { + "description": "A successful response from `POST /_ccr/auto_follow/my_auto_follow_pattern/pause`, which pauses an auto-follow pattern.", + "value": "{\n \"acknowledged\" : true\n}" + } } } } @@ -3069,6 +3176,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "PauseFollowIndexResponseExample1": { + "description": "A successful response from `POST /follower_index/_ccr/pause_follow`, which pauses a follower index.", + "value": "{\n \"acknowledged\" : true\n}" + } } } } @@ -3118,6 +3231,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "ResumeAutoFollowPatternResponseExample1": { + "description": "A successful response `POST /_ccr/auto_follow/my_auto_follow_pattern/resume`, which resumes an auto-follow pattern.\n", + "value": "{\n \"acknowledged\" : true\n}" + } } } } @@ -3197,6 +3316,12 @@ "$ref": "#/components/schemas/_types:Duration" } } + }, + "examples": { + "ResumeFollowIndexRequestExample1": { + "description": "Run `POST /follower_index/_ccr/resume_follow` to resume the follower index.", + "value": "{\n \"max_read_request_operation_count\" : 1024,\n \"max_outstanding_read_requests\" : 16,\n \"max_read_request_size\" : \"1024k\",\n \"max_write_request_operation_count\" : 32768,\n \"max_write_request_size\" : \"16k\",\n \"max_outstanding_write_requests\" : 8,\n \"max_write_buffer_count\" : 512,\n \"max_write_buffer_size\" : \"512k\",\n \"max_retry_delay\" : \"10s\",\n \"read_poll_timeout\" : \"30s\"\n}" + } } } } @@ -3208,6 +3333,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "ResumeFollowIndexResponseExample1": { + "description": "A successful response from resuming a folower index.", + "value": "{\n \"acknowledged\" : true\n}" + } } } } @@ -3265,6 +3396,12 @@ "auto_follow_stats", "follow_stats" ] + }, + "examples": { + "CcrStatsResponseExample1": { + "description": "A successful response from `GET /_ccr/stats` that returns cross-cluster replication stats.", + "value": "{\n \"auto_follow_stats\" : {\n \"number_of_failed_follow_indices\" : 0,\n \"number_of_failed_remote_cluster_state_requests\" : 0,\n \"number_of_successful_follow_indices\" : 1,\n \"recent_auto_follow_errors\" : [],\n \"auto_followed_clusters\" : []\n },\n \"follow_stats\" : {\n \"indices\" : [\n {\n \"index\" : \"follower_index\",\n \"total_global_checkpoint_lag\" : 256,\n \"shards\" : [\n {\n \"remote_cluster\" : \"remote_cluster\",\n \"leader_index\" : \"leader_index\",\n \"follower_index\" : \"follower_index\",\n \"shard_id\" : 0,\n \"leader_global_checkpoint\" : 1024,\n \"leader_max_seq_no\" : 1536,\n \"follower_global_checkpoint\" : 768,\n \"follower_max_seq_no\" : 896,\n \"last_requested_seq_no\" : 897,\n \"outstanding_read_requests\" : 8,\n \"outstanding_write_requests\" : 2,\n \"write_buffer_operation_count\" : 64,\n \"follower_mapping_version\" : 4,\n \"follower_settings_version\" : 2,\n \"follower_aliases_version\" : 8,\n \"total_read_time_millis\" : 32768,\n \"total_read_remote_exec_time_millis\" : 16384,\n \"successful_read_requests\" : 32,\n \"failed_read_requests\" : 0,\n \"operations_read\" : 896,\n \"bytes_read\" : 32768,\n \"total_write_time_millis\" : 16384,\n \"write_buffer_size_in_bytes\" : 1536,\n \"successful_write_requests\" : 16,\n \"failed_write_requests\" : 0,\n \"operations_written\" : 832,\n \"read_exceptions\" : [ ],\n \"time_since_last_read_millis\" : 8\n }\n ]\n }\n ]\n }\n}" + } } } } @@ -3314,6 +3451,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "UnfollowIndexResponseExample1": { + "description": "A successful response from `POST /follower_index/_ccr/unfollow`.", + "value": "{\n \"acknowledged\" : true\n}" + } } } } @@ -3516,6 +3659,12 @@ "required": [ "id" ] + }, + "examples": { + "ClosePointInTimeRequestExample1": { + "description": "Run `DELETE /_pit` to close a point-in-time.", + "value": "{\n \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==\"\n}" + } } } } @@ -4081,6 +4230,18 @@ } } } + }, + "examples": { + "ClusterPutSettingsRequestExample1": { + "summary": "A simple setting", + "description": "An example of a persistent update.", + "value": "{\n \"persistent\" : {\n \"indices.recovery.max_bytes_per_sec\" : \"50mb\"\n }\n}" + }, + "ClusterPutSettingsRequestExample2": { + "summary": "A setting with multiple patterns", + "description": "PUT `/_cluster/settings` to update the `action.auto_create_index` setting. The setting accepts a comma-separated list of patterns that you want to allow or you can prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. In this example, the auto-creation of indices called `my-index-000001` or `index10` is allowed, the creation of indices that match the pattern `index1*` is blocked, and the creation of any other indices that match the `ind*` pattern is allowed. Patterns are matched in the order specified.\n", + "value": "{\n \"persistent\": {\n \"action.auto_create_index\": \"my-index-000001,index10,-index1*,+ind*\" \n }\n}" + } } } }, @@ -4454,6 +4615,12 @@ } } } + }, + "examples": { + "ClusterRerouteRequestExample1": { + "description": "Run `POST /_cluster/reroute?metric=none` to changes the allocation of shards in a cluster.", + "value": "{\n \"commands\": [\n {\n \"move\": {\n \"index\": \"test\", \"shard\": 0,\n \"from_node\": \"node1\", \"to_node\": \"node2\"\n }\n },\n {\n \"allocate_replica\": {\n \"index\": \"test\", \"shard\": 1,\n \"node\": \"node3\"\n }\n }\n ]\n}" + } } } } @@ -4713,6 +4880,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorCheckInResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -4834,6 +5006,11 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "ConnectorDeleteResponseExample1": { + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -4906,6 +5083,11 @@ "type": "object" } } + }, + "examples": { + "ConnectorUpdateLastSyncRequestExample1": { + "value": "{\n \"last_access_control_sync_error\": \"Houston, we have a problem!\",\n \"last_access_control_sync_scheduled_at\": \"2023-11-09T15:13:08.231Z\",\n \"last_access_control_sync_status\": \"pending\",\n \"last_deleted_document_count\": 42,\n \"last_incremental_sync_scheduled_at\": \"2023-11-09T15:13:08.231Z\",\n \"last_indexed_document_count\": 42,\n \"last_sync_error\": \"Houston, we have a problem!\",\n \"last_sync_scheduled_at\": \"2024-11-09T15:13:08.231Z\",\n \"last_sync_status\": \"completed\",\n \"last_synced\": \"2024-11-09T15:13:08.231Z\"\n}" + } } } }, @@ -5322,6 +5504,11 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "SyncJobDeleteResponseExample1": { + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -5365,6 +5552,11 @@ "required": [ "error" ] + }, + "examples": { + "SyncJobErrorRequestExample1": { + "value": "{\n \"error\": \"some-error\"\n}" + } } } }, @@ -5510,6 +5702,11 @@ "required": [ "id" ] + }, + "examples": { + "SyncJobPostRequestExample1": { + "value": "{\n \"id\": \"connector-id\",\n \"job_type\": \"full\",\n \"trigger_method\": \"on_demand\"\n}" + } } } }, @@ -5691,6 +5888,11 @@ "type": "string" } } + }, + "examples": { + "ConnectorUpdateApiKeyIDRequestExample1": { + "value": "{\n \"api_key_id\": \"my-api-key-id\",\n \"api_key_secret_id\": \"my-connector-secret-id\"\n}" + } } } }, @@ -5711,6 +5913,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateAPIKeyIDResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -5756,6 +5963,14 @@ } } } + }, + "examples": { + "ConnectorUpdateConfigurationRequestExample1": { + "value": "{\n \"values\": {\n \"tenant_id\": \"my-tenant-id\",\n \"tenant_name\": \"my-sharepoint-site\",\n \"client_id\": \"foo\",\n \"secret_value\": \"bar\",\n \"site_collections\": \"*\"\n }\n}" + }, + "ConnectorUpdateConfigurationRequestExample2": { + "value": "{\n \"values\": {\n \"secret_value\": \"foo-bar\"\n }\n}" + } } } }, @@ -5776,6 +5991,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateConfigurationResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -5825,6 +6045,11 @@ "required": [ "error" ] + }, + "examples": { + "ConnectorUpdateErrorRequestExample1": { + "value": "{\n \"error\": \"Houston, we have a problem!\"\n}" + } } } }, @@ -5845,6 +6070,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateErrorResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -5887,6 +6117,14 @@ "required": [ "features" ] + }, + "examples": { + "ConnectorUpdateFeaturesRequestExample1": { + "value": "{\n \"features\": {\n \"document_level_security\": {\n \"enabled\": true\n },\n \"incremental_sync\": {\n \"enabled\": true\n },\n \"sync_rules\": {\n \"advanced\": {\n \"enabled\": false\n },\n \"basic\": {\n \"enabled\": true\n }\n }\n }\n}" + }, + "ConnectorUpdateFeaturesRequestExample2": { + "value": "{\n \"features\": {\n \"document_level_security\": {\n \"enabled\": true\n }\n }\n}" + } } } }, @@ -5907,6 +6145,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateFeaturesResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -5958,6 +6201,14 @@ "$ref": "#/components/schemas/connector._types:FilteringAdvancedSnippet" } } + }, + "examples": { + "ConnectorUpdateFilteringRequestExample1": { + "value": "{\n \"rules\": [\n {\n \"field\": \"file_extension\",\n \"id\": \"exclude-txt-files\",\n \"order\": 0,\n \"policy\": \"exclude\",\n \"rule\": \"equals\",\n \"value\": \"txt\"\n },\n {\n \"field\": \"_\",\n \"id\": \"DEFAULT\",\n \"order\": 1,\n \"policy\": \"include\",\n \"rule\": \"regex\",\n \"value\": \".*\"\n }\n ]\n}" + }, + "ConnectorUpdateFilteringRequestExample2": { + "value": "{\n \"advanced_snippet\": {\n \"value\": [{\n \"tables\": [\n \"users\",\n \"orders\"\n ],\n \"query\": \"SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id\"\n }]\n }\n}" + } } } }, @@ -5978,6 +6229,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateFilteringResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -6089,6 +6345,11 @@ "required": [ "index_name" ] + }, + "examples": { + "ConnectorUpdateIndexNameRequestExample1": { + "value": "{\n \"index_name\": \"data-from-my-google-drive\"\n}" + } } } }, @@ -6109,6 +6370,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateIndexNameResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -6150,6 +6416,11 @@ "type": "string" } } + }, + "examples": { + "ConnectorUpdateNameRequestExample1": { + "value": "{\n \"name\": \"Custom connector\",\n \"description\": \"This is my customized connector\"\n}" + } } } }, @@ -6170,6 +6441,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateNameResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -6273,6 +6549,11 @@ "required": [ "pipeline" ] + }, + "examples": { + "ConnectorUpdatePipelineRequestExample1": { + "value": "{\n \"pipeline\": {\n \"extract_binary_content\": true,\n \"name\": \"my-connector-pipeline\",\n \"reduce_whitespace\": true,\n \"run_ml_inference\": true\n }\n}" + } } } }, @@ -6293,6 +6574,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdatePipelineResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -6334,6 +6620,14 @@ "required": [ "scheduling" ] + }, + "examples": { + "ConnectorUpdateSchedulingRequestExample1": { + "value": "{\n \"scheduling\": {\n \"access_control\": {\n \"enabled\": true,\n \"interval\": \"0 10 0 * * ?\"\n },\n \"full\": {\n \"enabled\": true,\n \"interval\": \"0 20 0 * * ?\"\n },\n \"incremental\": {\n \"enabled\": false,\n \"interval\": \"0 30 0 * * ?\"\n }\n }\n}" + }, + "ConnectorUpdateSchedulingRequestExample2": { + "value": "{\n \"scheduling\": {\n \"full\": {\n \"enabled\": true,\n \"interval\": \"0 10 0 * * ?\"\n }\n }\n}" + } } } }, @@ -6354,6 +6648,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateSchedulingResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -6395,6 +6694,11 @@ "required": [ "service_type" ] + }, + "examples": { + "ConnectorUpdateServiceTypeRequestExample1": { + "value": "{\n \"service_type\": \"sharepoint_online\"\n}" + } } } }, @@ -6415,6 +6719,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateServiceTypeResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -6456,6 +6765,11 @@ "required": [ "status" ] + }, + "examples": { + "ConnectorUpdateStatusRequestExample1": { + "value": "{\n \"status\": \"needs_configuration\"\n}" + } } } }, @@ -6476,6 +6790,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateStatusResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -6899,6 +7218,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "ImportDanglingIndexResponseExample1": { + "description": "A successful response from `POST /_dangling/zmM4e0JtBkeUjiHD-MihPQ?accept_data_loss=true`.\n", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -6998,6 +7323,11 @@ "required": [ "dangling_indices" ] + }, + "examples": { + "ListDanglingIndicesResponseExample1": { + "value": "{\n \"dangling_indices\": [\n {\n \"index_name\": \"my-index-000001\",\n \"index_uuid\": \"zmM4e0JtBkeUjiHD-MihPQ\",\n \"creation_date_millis\": 1589414451372,\n \"node_ids\": [\n \"pL47UN3dAb2d5RCWP6lQ3e\"\n ]\n }\n ]\n}" + } } } } @@ -7155,6 +7485,23 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_global.get:GetResult" + }, + "examples": { + "GetResponseExample1": { + "summary": "Get a document", + "description": "A successful response from `GET my-index-000001/_doc/0`. It retrieves the JSON document with the `_id` 0 from the `my-index-000001` index.\n", + "value": "{\n \"_index\": \"my-index-000001\",\n \"_id\": \"0\",\n \"_version\": 1,\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"found\": true,\n \"_source\": {\n \"@timestamp\": \"2099-11-15T14:12:12\",\n \"http\": {\n \"request\": {\n \"method\": \"get\"\n },\n \"response\": {\n \"status_code\": 200,\n \"bytes\": 1070000\n },\n \"version\": \"1.1\"\n },\n \"source\": {\n \"ip\": \"127.0.0.1\"\n },\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n }\n}" + }, + "GetResponseExample2": { + "summary": "Get stored fields", + "description": "A successful response from `GET my-index-000001/_doc/1?stored_fields=tags,counter`, which retrieves a set of stored fields. Field values fetched from the document itself are always returned as an array. Any requested fields that are not stored (such as the counter field in this example) are ignored.\n", + "value": "{\n \"_index\": \"my-index-000001\",\n \"_id\": \"1\",\n \"_version\": 1,\n \"_seq_no\" : 22,\n \"_primary_term\" : 1,\n \"found\": true,\n \"fields\": {\n \"tags\": [\n \"production\"\n ]\n }\n}" + }, + "GetResponseExample3": { + "summary": "Get metadata fields", + "description": "A successful response from `GET my-index-000001/_doc/2?routing=user1&stored_fields=tags,counter`, which retrieves the `_routing` metadata field.\n", + "value": "{\n \"_index\": \"my-index-000001\",\n \"_id\": \"2\",\n \"_version\": 1,\n \"_seq_no\" : 13,\n \"_primary_term\" : 1,\n \"_routing\": \"user1\",\n \"found\": true,\n \"fields\": {\n \"tags\": [\n \"env2\"\n ]\n }\n}" + } } } } @@ -7405,6 +7752,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:WriteResponseBase" + }, + "examples": { + "DeleteResponseExample1": { + "description": "A successful response from `DELETE /my-index-000001/_doc/1`, which deletes the JSON document 1 from the `my-index-000001` index.", + "value": "{\n \"_shards\": {\n \"total\": 2,\n \"failed\": 0,\n \"successful\": 2\n },\n \"_index\": \"my-index-000001\",\n \"_id\": \"1\",\n \"_version\": 2,\n \"_primary_term\": 1,\n \"_seq_no\": 5,\n \"result\": \"deleted\"\n}" + } } } } @@ -7886,6 +8239,28 @@ "$ref": "#/components/schemas/_types:SlicedScroll" } } + }, + "examples": { + "DeleteByQueryRequestExample1": { + "summary": "Delete all documents", + "description": "Run `POST /my-index-000001,my-index-000002/_delete_by_query` to delete all documents from multiple data streams or indices.", + "value": "{\n \"query\": {\n \"match_all\": {}\n }\n}" + }, + "DeleteByQueryRequestExample2": { + "summary": "Delete a single document", + "description": "Run `POST my-index-000001/_delete_by_query` to delete a document by using a unique attribute.", + "value": "{\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"max_docs\": 1\n}" + }, + "DeleteByQueryRequestExample3": { + "summary": "Slice manually", + "description": "Run `POST my-index-000001/_delete_by_query` to slice a delete by query manually. Provide a slice ID and total number of slices.\n", + "value": "{\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n },\n \"query\": {\n \"range\": {\n \"http.response.bytes\": {\n \"lt\": 2000000\n }\n }\n }\n}" + }, + "DeleteByQueryRequestExample4": { + "summary": "Automatic slicing", + "description": "Run `POST my-index-000001/_delete_by_query?refresh&slices=5` to let delete by query automatically parallelize using sliced scroll to slice on `_id`. The `slices` query parameter value specifies the number of slices to use.\n", + "value": "{\n \"query\": {\n \"range\": {\n \"http.response.bytes\": {\n \"lt\": 2000000\n }\n }\n }\n}" + } } } }, @@ -7959,6 +8334,12 @@ "type": "number" } } + }, + "examples": { + "DeleteByQueryResponseExample1": { + "description": "A successful response from `POST /my-index-000001/_delete_by_query`.", + "value": "{\n \"took\" : 147,\n \"timed_out\": false,\n \"total\": 119,\n \"deleted\": 119,\n \"batches\": 1,\n \"version_conflicts\": 0,\n \"noops\": 0,\n \"retries\": {\n \"bulk\": 0,\n \"search\": 0\n },\n \"throttled_millis\": 0,\n \"requests_per_second\": -1.0,\n \"throttled_until_millis\": 0,\n \"failures\" : [ ]\n}" + } } } } @@ -8618,6 +8999,12 @@ "is_partial", "is_running" ] + }, + "examples": { + "EqlGetStatusResponseExample1": { + "description": "A successful response for getting status information for an async EQL search.", + "value": "{\n \"id\": \"FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=\",\n \"is_running\" : true,\n \"is_partial\" : true,\n \"start_time_in_millis\" : 1611690235000,\n \"expiration_time_in_millis\" : 1611690295000\n}" + } } } } @@ -8844,6 +9231,11 @@ "required": [ "query" ] + }, + "examples": { + "AsyncQueryRequestExample1": { + "value": "{\n \"query\": \"\"\"\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n \"\"\",\n \"wait_for_completion_timeout\": \"2s\"\n}" + } } } }, @@ -9107,6 +9499,12 @@ "required": [ "query" ] + }, + "examples": { + "QueryRequestExample1": { + "description": "Run `POST /_query` to get results for an ES|QL query.", + "value": "{\n \"query\": \"\"\"\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n \"\"\"\n}" + } } } }, @@ -9572,6 +9970,12 @@ "required": [ "features" ] + }, + "examples": { + "GetFeaturesResponseExample1": { + "description": "A successful response for retrieving a list of feature states that can be included when taking a snapshot.", + "value": "{\n \"features\": [\n {\n \"name\": \"tasks\",\n \"description\": \"Manages task results\"\n },\n {\n \"name\": \"kibana\",\n \"description\": \"Manages Kibana configuration and reports\"\n }\n ]\n}" + } } } } @@ -9618,6 +10022,12 @@ "required": [ "features" ] + }, + "examples": { + "ResetFeaturesResponseExample1": { + "description": "A successful response for clearing state information stored in system indices by Elasticsearch features.", + "value": "{\n \"features\" : [\n {\n \"feature_name\" : \"security\",\n \"status\" : \"SUCCESS\"\n },\n {\n \"feature_name\" : \"tasks\",\n \"status\" : \"SUCCESS\"\n }\n ]\n}" + } } } } @@ -10725,6 +11135,12 @@ "$ref": "#/components/schemas/ilm._types:Policy" } } + }, + "examples": { + "PutLifecycleRequestExample1": { + "description": "Run `PUT _ilm/policy/my_policy` to create a new policy with arbitrary metadata.\n", + "value": "{\n \"policy\": {\n \"_meta\": {\n \"description\": \"used for nginx log\",\n \"project\": {\n \"name\": \"myProject\",\n \"department\": \"myDepartment\"\n }\n },\n \"phases\": {\n \"warm\": {\n \"min_age\": \"10d\",\n \"actions\": {\n \"forcemerge\": {\n \"max_num_segments\": 1\n }\n }\n },\n \"delete\": {\n \"min_age\": \"30d\",\n \"actions\": {\n \"delete\": {}\n }\n }\n }\n }\n}" + } } } } @@ -10736,6 +11152,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "PutLifecycleResponseExample1": { + "description": "A successful response when creating a new lifecycle policy.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -10790,6 +11212,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "DeleteLifecycleResponseExample1": { + "description": "A successful response when deleting a lifecycle policy.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -10867,6 +11295,12 @@ "required": [ "indices" ] + }, + "examples": { + "ExplainLifecycleResponseExample1": { + "description": "A successful response when retrieving the current ILM status for an index.", + "value": "{\n \"indices\": {\n \"my-index-000001\": {\n \"index\": \"my-index-000001\",\n \"index_creation_date_millis\": 1538475653281,\n \"index_creation_date\": \"2018-10-15T13:45:21.981Z\",\n \"time_since_index_creation\": \"15s\",\n \"managed\": true,\n \"policy\": \"my_policy\",\n \"lifecycle_date_millis\": 1538475653281,\n \"lifecycle_date\": \"2018-10-15T13:45:21.981Z\",\n \"age\": \"15s\",\n \"phase\": \"new\",\n \"phase_time_millis\": 1538475653317,\n \"phase_time\": \"2018-10-15T13:45:22.577Z\",\n \"action\": \"complete\"\n \"action_time_millis\": 1538475653317,\n \"action_time\": \"2018-10-15T13:45:22.577Z\",\n \"step\": \"complete\",\n \"step_time_millis\": 1538475653317,\n \"step_time\": \"2018-10-15T13:45:22.577Z\"\n }\n }\n}" + } } } } @@ -10921,6 +11355,12 @@ "required": [ "operation_mode" ] + }, + "examples": { + "GetILMStatusResponseExample1": { + "description": "A successful response when retrieving the current ILM status.", + "value": "{\n \"operation_mode\": \"RUNNING\"\n}" + } } } } @@ -10965,6 +11405,12 @@ "type": "string" } } + }, + "examples": { + "RequestExample1": { + "description": "Run `POST /_ilm/migrate_to_data_tiers` to migrate the indices, ILM policies, legacy templates, composable, and component templates away from defining custom allocation filtering using the `custom_attribute_name` node attribute. It also deletes the legacy template with name `global-template` if it exists in the system.\n", + "value": "{\n \"legacy_template_to_delete\": \"global-template\",\n \"node_attribute\": \"custom_attribute_name\"\n}" + } } } } @@ -11025,6 +11471,12 @@ "migrated_composable_templates", "migrated_component_templates" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response when migrating indices, ILMs, and templates from custom node attributes to data tiers.\n", + "value": "{\n \"dry_run\": false,\n \"removed_legacy_template\":\"global-template\",\n \"migrated_ilm_policies\":[\"policy_with_allocate_action\"],\n \"migrated_indices\":[\"warm-index-to-migrate-000001\"],\n \"migrated_legacy_templates\":[\"a-legacy-template\"],\n \"migrated_composable_templates\":[\"a-composable-template\"],\n \"migrated_component_templates\":[\"a-component-template\"]\n}" + } } } } @@ -11071,6 +11523,18 @@ "current_step", "next_step" ] + }, + "examples": { + "MoveToStepRequestExample1": { + "summary": "Move to forcemerge step", + "description": "Run `POST _ilm/move/my-index-000001` to move `my-index-000001` from the initial step to the `forcemerge` step.\n", + "value": "{\n \"current_step\": {\n \"phase\": \"new\",\n \"action\": \"complete\",\n \"name\": \"complete\"\n },\n \"next_step\": {\n \"phase\": \"warm\",\n \"action\": \"forcemerge\",\n \"name\": \"forcemerge\"\n }\n}" + }, + "MoveToStepRequestExample2": { + "summary": "Move to warm step", + "description": "Run `POST _ilm/move/my-index-000001` to move `my-index-000001` from the end of hot phase into the start of warm.\n", + "value": "{\n \"current_step\": {\n \"phase\": \"hot\",\n \"action\": \"complete\",\n \"name\": \"complete\"\n },\n \"next_step\": {\n \"phase\": \"warm\"\n }\n}" + } } } } @@ -11082,6 +11546,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MoveToStepResponseExample1": { + "description": "A successful response when running a specific step in a lifecycle policy.\n", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -11133,6 +11603,12 @@ "failed_indexes", "has_failures" ] + }, + "examples": { + "RemovePolicyResponseExample1": { + "description": "A successful response when removing a lifecycle policy from an index.", + "value": "{\n \"has_failures\" : false,\n \"failed_indexes\" : []\n}" + } } } } @@ -11214,6 +11690,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "StartILMResponseExample1": { + "description": "A successful response when stating the ILM plugin.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -11259,6 +11741,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "StopILMResponseExample1": { + "description": "A successful response when stopping the ILM plugin.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -11437,6 +11925,12 @@ "shards_acknowledged", "indices" ] + }, + "examples": { + "IndicesAddBlockResponseExample1": { + "description": "A successful response from `PUT /my-index-000001/_block/write`, which adds an index block to an index.'", + "value": "{\n \"acknowledged\" : true,\n \"shards_acknowledged\" : true,\n \"indices\" : [ {\n \"name\" : \"my-index-000001\",\n \"blocked\" : true\n } ]\n}" + } } } } @@ -11827,6 +12321,12 @@ "indices", "shards_acknowledged" ] + }, + "examples": { + "CloseIndexResponseExample1": { + "description": "A successful response for closing an index.", + "value": "{\n \"acknowledged\": true,\n \"shards_acknowledged\": true,\n \"indices\": {\n \"my-index-000001\": {\n \"closed\": true\n }\n }\n}" + } } } } @@ -12021,6 +12521,23 @@ "$ref": "#/components/schemas/indices._types:IndexSettings" } } + }, + "examples": { + "indicesCreateRequestExample1": { + "summary": "Create an index.", + "description": "This request specifies the `number_of_shards` and `number_of_replicas`.", + "value": "{\n \"settings\": {\n \"number_of_shards\": 3,\n \"number_of_replicas\": 2\n }\n}" + }, + "indicesCreateRequestExample2": { + "summary": "Create an index with mappings.", + "description": "You can provide mapping definitions in the create index API requests.", + "value": "{\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"mappings\": {\n \"properties\": {\n \"field1\": { \"type\": \"text\" }\n }\n }\n}" + }, + "indicesCreateRequestExample3": { + "summary": "Create an index with aliases.", + "description": "You can provide mapping definitions in the create index API requests. Index alias names also support date math.\n", + "value": "{\n \"aliases\": {\n \"alias_1\": {},\n \"alias_2\": {\n \"filter\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"routing\": \"shard-1\"\n }\n }\n}" + } } } } @@ -12778,6 +13295,12 @@ "required": [ "data_streams" ] + }, + "examples": { + "IndicesGetDataLifecycleResponseExample1": { + "description": "A successful response from `GET _lifecycle/stats?human&pretty`.", + "value": "{\n \"data_streams\": [\n {\n \"name\": \"my-data-stream-1\",\n \"lifecycle\": {\n \"enabled\": true,\n \"data_retention\": \"7d\"\n }\n },\n {\n \"name\": \"my-data-stream-2\",\n \"lifecycle\": {\n \"enabled\": true,\n \"data_retention\": \"7d\"\n }\n }\n ]\n}" + } } } } @@ -12840,6 +13363,17 @@ "application/json": { "schema": { "$ref": "#/components/schemas/indices._types:DataStreamLifecycle" + }, + "examples": { + "IndicesPutDataLifecycleRequestExample1": { + "summary": "Set the data stream lifecycle retention", + "value": "{\n \"data_retention\": \"7d\"\n}" + }, + "IndicesPutDataLifecycleRequestExample2": { + "summary": "Set the data stream lifecycle downsampling", + "description": "This example configures two downsampling rounds.", + "value": "{\n \"downsampling\": [\n {\n \"after\": \"1d\",\n \"fixed_interval\": \"10m\"\n },\n {\n \"after\": \"7d\",\n \"fixed_interval\": \"1d\"\n }\n ]\n}" + } } } }, @@ -13453,6 +13987,11 @@ "application/json": { "schema": { "$ref": "#/components/schemas/indices._types:DownsampleConfig" + }, + "examples": { + "RequestExample1": { + "value": "{\n \"fixed_interval\": \"1d\"\n}" + } } } }, @@ -13594,6 +14133,18 @@ "required": [ "indices" ] + }, + "examples": { + "IndicesExplainDataLifecycleResponseExample1": { + "summary": "Successful response", + "description": "A successful response from `GET .ds-metrics-2023.03.22-000001/_lifecycle/explain`, which retrieves the lifecycle status for a data stream backing index. If the index is managed by a data stream lifecycle, the API will show the `managed_by_lifecycle` field set to `true` and the rest of the response will contain information about the lifecycle execution status for this index.\n", + "value": "{\n \"indices\": {\n \".ds-metrics-2023.03.22-000001\": {\n \"index\" : \".ds-metrics-2023.03.22-000001\",\n \"managed_by_lifecycle\" : true,\n \"index_creation_date_millis\" : 1679475563571,\n \"time_since_index_creation\" : \"843ms\",\n \"rollover_date_millis\" : 1679475564293,\n \"time_since_rollover\" : \"121ms\",\n \"lifecycle\" : { },\n \"generation_time\" : \"121ms\"\n }\n}" + }, + "IndicesExplainDataLifecycleResponseExample2": { + "summary": "Successful response with error messages", + "description": "The API reports any errors related to the lifecycle execution for the target index.", + "value": "{\n \"indices\": {\n \".ds-metrics-2023.03.22-000001\": {\n \"index\" : \".ds-metrics-2023.03.22-000001\",\n \"managed_by_lifecycle\" : true,\n \"index_creation_date_millis\" : 1679475563571,\n \"time_since_index_creation\" : \"843ms\",\n \"lifecycle\" : {\n \"enabled\": true\n },\n \"error\": \"{\\\"type\\\":\\\"validation_exception\\\",\\\"reason\\\":\\\"Validation Failed: 1: this action would add [2] shards, but this cluster\ncurrently has [4]/[3] maximum normal shards open;\\\"}\"\n }\n}" + } } } } @@ -14004,6 +14555,12 @@ "data_stream_count", "data_streams" ] + }, + "examples": { + "IndicesGetDataLifecycleStatsResponseExample1": { + "description": "A successful response for `GET _lifecycle/stats?human&pretty`", + "value": "{\n \"last_run_duration_in_millis\": 2,\n \"last_run_duration\": \"2ms\",\n \"time_between_starts_in_millis\": 9998,\n \"time_between_starts\": \"9.99s\",\n \"data_streams_count\": 2,\n \"data_streams\": [\n {\n \"name\": \"my-data-stream\",\n \"backing_indices_in_total\": 2,\n \"backing_indices_in_error\": 0\n },\n {\n \"name\": \"my-other-stream\",\n \"backing_indices_in_total\": 2,\n \"backing_indices_in_error\": 1\n }\n ]\n}" + } } } } @@ -15318,6 +15875,12 @@ "aliases", "data_streams" ] + }, + "examples": { + "ResolveIndexResponseExample1": { + "description": "A successful response from `GET /_resolve/index/f*,remoteCluster1:bar*?expand_wildcards=all`.", + "value": "{\n \"indices\": [\n {\n \"name\": \"foo_closed\",\n \"attributes\": [\n \"closed\"\n ]\n },\n {\n \"name\": \"freeze-index\",\n \"aliases\": [\n \"f-alias\"\n ],\n \"attributes\": [\n \"open\"\n ]\n },\n {\n \"name\": \"remoteCluster1:bar-01\",\n \"attributes\": [\n \"open\"\n ]\n }\n ],\n \"aliases\": [\n {\n \"name\": \"f-alias\",\n \"indices\": [\n \"freeze-index\",\n \"my-index-000001\"\n ]\n }\n ],\n \"data_streams\": [\n {\n \"name\": \"foo\",\n \"backing_indices\": [\n \".ds-foo-2099.03.07-000001\"\n ],\n \"timestamp_field\": \"@timestamp\"\n }\n ]\n}" + } } } } @@ -16679,6 +17242,12 @@ "tagline", "version" ] + }, + "examples": { + "RootNodeInfoResponseExample1": { + "description": "A successful response from `GET /`s.", + "value": "{\n \"name\": \"instance-0000000000\",\n \"cluster_name\": \"my_test_cluster\",\n \"cluster_uuid\": \"5QaxoN0pRZuOmWSxstBBwQ\",\n \"version\": {\n \"build_date\": \"2024-02-01T13:07:13.727175297Z\",\n \"minimum_wire_compatibility_version\": \"7.17.0\",\n \"build_hash\": \"6185ba65d27469afabc9bc951cded6c17c21e3f3\",\n \"number\": \"8.12.1\",\n \"lucene_version\": \"9.9.2\",\n \"minimum_index_compatibility_version\": \"7.0.0\",\n \"build_flavor\": \"default\",\n \"build_snapshot\": false,\n \"build_type\": \"docker\"\n },\n \"tagline\": \"You Know, for Search\"\n}" + } } } } @@ -17109,6 +17678,17 @@ "type": "boolean" } } + }, + "examples": { + "PutPipelineRequestExample1": { + "summary": "Create an ingest pipeline.", + "value": "{\n \"description\" : \"My optional pipeline description\",\n \"processors\" : [\n {\n \"set\" : {\n \"description\" : \"My optional processor description\",\n \"field\": \"my-keyword-field\",\n \"value\": \"foo\"\n }\n }\n ]\n}" + }, + "PutPipelineRequestExample2": { + "summary": "Create an ingest pipeline with metadata.", + "description": "You can use the `_meta` parameter to add arbitrary metadata to a pipeline.", + "value": "{\n \"description\" : \"My optional pipeline description\",\n \"processors\" : [\n {\n \"set\" : {\n \"description\" : \"My optional processor description\",\n \"field\": \"my-keyword-field\",\n \"value\": \"foo\"\n }\n }\n ],\n \"_meta\": {\n \"reason\": \"set my-keyword-field to foo\",\n \"serialization\": {\n \"class\": \"MyPipeline\",\n \"id\": 10\n }\n }\n}" + } } } }, @@ -17525,6 +18105,12 @@ "required": [ "license" ] + }, + "examples": { + "GetLicenseResponseExample1": { + "description": "A successful response from `GET /_license`.", + "value": "{\n \"license\" : {\n \"status\" : \"active\",\n \"uid\" : \"cbff45e7-c553-41f7-ae4f-9205eabd80xx\",\n \"type\" : \"trial\",\n \"issue_date\" : \"2018-10-20T22:05:12.332Z\",\n \"issue_date_in_millis\" : 1540073112332,\n \"expiry_date\" : \"2018-11-19T22:05:12.332Z\",\n \"expiry_date_in_millis\" : 1542665112332,\n \"max_nodes\" : 1000,\n \"max_resource_units\" : null,\n \"issued_to\" : \"test\",\n \"issuer\" : \"elasticsearch\",\n \"start_date_in_millis\" : -1\n }\n}" + } } } } @@ -17903,6 +18489,13 @@ "application/json": { "schema": { "$ref": "#/components/schemas/logstash._types:Pipeline" + }, + "examples": { + "LogstashPutPipelineRequestExample1": { + "summary": "Create a pipeline", + "description": "Run `PUT _logstash/pipeline/my_pipeline` to create a pipeline.", + "value": "{\n \"description\": \"Sample pipeline for illustration purposes\",\n \"last_modified\": \"2021-01-02T02:50:51.250Z\",\n \"pipeline_metadata\": {\n \"type\": \"logstash_pipeline\",\n \"version\": 1\n },\n \"username\": \"elastic\",\n \"pipeline\": \"input {}\\\\n filter { grok {} }\\\\n output {}\",\n \"pipeline_settings\": {\n \"pipeline.workers\": 1,\n \"pipeline.batch.size\": 125,\n \"pipeline.batch.delay\": 50,\n \"queue.type\": \"memory\",\n \"queue.max_bytes\": \"1gb\",\n \"queue.checkpoint.writes\": 1024\n }\n}" + } } } }, @@ -18232,6 +18825,12 @@ "features", "migration_status" ] + }, + "examples": { + "GetFeatureUpgradeStatusResponseExample1": { + "description": "A successful response from `GET /_migration/system_features`.", + "value": "{\n \"features\" : [\n {\n \"feature_name\" : \"async_search\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"enrich\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"ent_search\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"fleet\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"geoip\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"kibana\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"logstash_management\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"machine_learning\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"searchable_snapshots\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"security\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"synonyms\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"tasks\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"transform\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n },\n {\n \"feature_name\" : \"watcher\",\n \"minimum_index_version\" : \"8100099\",\n \"migration_status\" : \"NO_MIGRATION_NEEDED\",\n \"indices\" : [ ]\n }\n ],\n \"migration_status\" : \"NO_MIGRATION_NEEDED\"\n}" + } } } } @@ -18268,6 +18867,12 @@ "accepted", "features" ] + }, + "examples": { + "PostFeatureUpgradeResponseExample1": { + "description": "When you run `POST /_migration/system_features` to start the migration process, the response lists the features that will be migrated.\n", + "value": "{\n \"accepted\" : true,\n \"features\" : [\n {\n \"feature_name\" : \"security\"\n }\n ]\n}" + } } } } @@ -18312,6 +18917,12 @@ "required": [ "cleared" ] + }, + "examples": { + "MlClearTrainedModelDeploymentCacheResponseExample1": { + "description": "A successful response when clearing the inference cache.", + "value": "{\n \"cleared\": true\n}" + } } } } @@ -18408,6 +19019,12 @@ "required": [ "closed" ] + }, + "examples": { + "MlCloseJobResponseExample1": { + "description": "A successful response when closing anomaly detection jobs.", + "value": "{\n \"closed\": true\n}" + } } } } @@ -18569,6 +19186,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteCalendarResponseExample1": { + "description": "A successful response when deleting a calendar.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -18615,6 +19238,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteCalendarEventResponseExample1": { + "description": "A successful response when deleting a calendar event.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -18737,6 +19366,12 @@ "calendar_id", "job_ids" ] + }, + "examples": { + "MlDeleteCalendarJobResponseExample1": { + "description": "A successful response when deleting an anomaly detection job from a calendar.", + "value": "{\n \"calendar_id\": \"planned-outages\",\n \"job_ids\": []\n}" + } } } } @@ -18963,6 +19598,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteDataFrameAnalyticsResponseExample1": { + "description": "A successful response when deleting a data frame analytics job.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -19236,6 +19877,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteDatafeedResponseExample1": { + "description": "A successful response when deleting a datafeed.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -19430,6 +20077,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteFilterResponseExample1": { + "description": "A successful response when deleting a filter.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -19747,6 +20400,12 @@ "analysis_config", "data_description" ] + }, + "examples": { + "MlPutJobRequestExample1": { + "description": "A request to create an anomaly detection job and datafeed.", + "value": "{\n \"analysis_config\": {\n \"bucket_span\": \"15m\",\n \"detectors\": [\n {\n \"detector_description\": \"Sum of bytes\",\n \"function\": \"sum\",\n \"field_name\": \"bytes\"\n }\n ]\n },\n \"data_description\": {\n \"time_field\": \"timestamp\",\n \"time_format\": \"epoch_ms\"\n },\n \"analysis_limits\": {\n \"model_memory_limit\": \"11MB\"\n },\n \"model_plot_config\": {\n \"enabled\": true,\n \"annotations_enabled\": true\n },\n \"results_index_name\": \"test-job1\",\n \"datafeed_config\": {\n \"indices\": [\n \"kibana_sample_data_logs\"\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match_all\": {}\n }\n ]\n }\n },\n \"runtime_mappings\": {\n \"hour_of_day\": {\n \"type\": \"long\",\n \"script\": {\n \"source\": \"emit(doc['timestamp'].value.getHour());\"\n }\n }\n },\n \"datafeed_id\": \"datafeed-test-job1\"\n }\n}" + } } } }, @@ -19901,6 +20560,18 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteJobResponseExample1": { + "summary": "Delete job", + "description": "A successful response when deleting an anomaly detection job.", + "value": "{\n \"acknowledged\": true\n}" + }, + "MlDeleteJobResponseExample2": { + "summary": "Delete job asynchronously", + "description": "A successful response when deleting an anomaly detection job asynchronously. When the `wait_for_completion` query parameter is set to `false`, the response contains an identifier for the job deletion task.\n", + "value": "{\n \"task\": \"oTUltX4IQMOUUVeiohTt8A:39\"\n}" + } } } } @@ -20032,6 +20703,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteModelSnapshotResponseExample1": { + "description": "A successful response when deleting an existing model snapshot.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -20238,6 +20915,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteTrainedModelResponseExample1": { + "description": "A successful response when deleting an existing trained inference model.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -20340,6 +21023,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteTrainedModelAliasResponseExample1": { + "description": "A successful response when deleting a trained model alias.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -20380,6 +21069,12 @@ } } } + }, + "examples": { + "MlEstimateModelMemoryRequestExample1": { + "description": "Run `POST _ml/anomaly_detectors/_estimate_model_memory` to estimate the model memory limit based on the analysis configuration details provided in the request body.", + "value": "{\n \"analysis_config\": {\n \"bucket_span\": \"5m\",\n \"detectors\": [\n {\n \"function\": \"sum\",\n \"field_name\": \"bytes\",\n \"by_field_name\": \"status\",\n \"partition_field_name\": \"app\"\n }\n ],\n \"influencers\": [\n \"source_ip\",\n \"dest_ip\"\n ]\n },\n \"overall_cardinality\": {\n \"status\": 10,\n \"app\": 50\n },\n \"max_bucket_cardinality\": {\n \"source_ip\": 300,\n \"dest_ip\": 30\n }\n}" + } } } }, @@ -20400,6 +21095,12 @@ "required": [ "model_memory_estimate" ] + }, + "examples": { + "MlEstimateModelMemoryResponseExample1": { + "description": "A successful response from `POST _ml/anomaly_detectors/_estimate_model_memory`.", + "value": "{\n \"model_memory_estimate\": \"21mb\"\n}" + } } } } @@ -20436,6 +21137,33 @@ "evaluation", "index" ] + }, + "examples": { + "MlEvaluateDataFrameRequestExample1": { + "summary": "Classification example 1", + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate a a classification job for an annotated index. The `actual_field` contains the ground truth for classification. The `predicted_field` contains the predicted value calculated by the classification analysis.\n", + "value": "{\n \"index\": \"animal_classification\",\n \"evaluation\": {\n \"classification\": {\n \"actual_field\": \"animal_class\",\n \"predicted_field\": \"ml.animal_class_prediction\",\n \"metrics\": {\n \"multiclass_confusion_matrix\": {}\n }\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample2": { + "summary": "Classification example 2", + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate a classification job with AUC ROC metrics for an annotated index. The `actual_field` contains the ground truth value for the actual animal classification. This is required in order to evaluate results. The `class_name` specifies the class name that is treated as positive during the evaluation, all the other classes are treated as negative.\n", + "value": "{\n \"index\": \"animal_classification\",\n \"evaluation\": {\n \"classification\": {\n \"actual_field\": \"animal_class\",\n \"metrics\": {\n \"auc_roc\": {\n \"class_name\": \"dog\"\n }\n }\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample3": { + "summary": "Outlier detection", + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate an outlier detection job for an annotated index.\n", + "value": "{\n \"index\": \"my_analytics_dest_index\",\n \"evaluation\": {\n \"outlier_detection\": {\n \"actual_field\": \"is_outlier\",\n \"predicted_probability_field\": \"ml.outlier_score\"\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample4": { + "summary": "Regression example 1", + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate the testing error of a regression job for an annotated index. The term query in the body limits evaluation to be performed on the test split only. The `actual_field` contains the ground truth for house prices. The `predicted_field` contains the house price calculated by the regression analysis.\n", + "value": "{\n \"index\": \"house_price_predictions\",\n \"query\": {\n \"bool\": {\n \"filter\": [\n {\n \"term\": {\n \"ml.is_training\": false\n }\n }\n ]\n },\n \"evaluation\": {\n \"regression\": {\n \"actual_field\": \"price\",\n \"predicted_field\": \"ml.price_prediction\",\n \"metrics\": {\n \"r_squared\": {},\n \"mse\": {},\n \"msle\": {\n \"offset\": 10\n },\n \"huber\": {\n \"delta\": 1.5\n }\n }\n }\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample5": { + "summary": "Regression example 2", + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate the training error of a regression job for an annotated index. The term query in the body limits evaluation to be performed on the training split only. The `actual_field` contains the ground truth for house prices. The `predicted_field` contains the house price calculated by the regression analysis.\n", + "value": "{\n \"index\": \"house_price_predictions\",\n \"query\": {\n \"term\": {\n \"ml.is_training\": {\n \"value\": true\n }\n }\n },\n \"evaluation\": {\n \"regression\": {\n \"actual_field\": \"price\",\n \"predicted_field\": \"ml.price_prediction\",\n \"metrics\": {\n \"r_squared\": {},\n \"mse\": {},\n \"msle\": {},\n \"huber\": {}\n }\n }\n }\n}" + } } } }, @@ -20459,6 +21187,23 @@ "$ref": "#/components/schemas/ml.evaluate_data_frame:DataframeRegressionSummary" } } + }, + "examples": { + "MlEvaluateDataFrameResponseExample1": { + "summary": "Classification example 1", + "description": "A succesful response from `POST _ml/data_frame/_evaluate` to evaluate a classification analysis job for an annotated index. The `actual_class` contains the name of the class the analysis tried to predict. The `actual_class_doc_count` is the number of documents in the index belonging to the `actual_class`. The `predicted_classes` object contains the list of the predicted classes and the number of predictions associated with the class.\n", + "value": "{\n \"classification\": {\n \"multiclass_confusion_matrix\": {\n \"confusion_matrix\": [\n {\n \"actual_class\": \"cat\",\n \"actual_class_doc_count\": 12,\n \"predicted_classes\": [\n {\n \"predicted_class\": \"cat\",\n \"count\": 12\n },\n {\n \"predicted_class\": \"dog\",\n \"count\": 0\n }\n ],\n \"other_predicted_class_doc_count\": 0\n },\n {\n \"actual_class\": \"dog\",\n \"actual_class_doc_count\": 11,\n \"predicted_classes\": [\n {\n \"predicted_class\": \"dog\",\n \"count\": 7\n },\n {\n \"predicted_class\": \"cat\",\n \"count\": 4\n }\n ],\n \"other_predicted_class_doc_count\": 0\n }\n ],\n \"other_actual_class_count\": 0\n }\n }\n}" + }, + "MlEvaluateDataFrameResponseExample2": { + "summary": "Classification example 2", + "description": "A succesful response from `POST _ml/data_frame/_evaluate` to evaluate a classification analysis job with the AUC ROC metrics for an annotated index.\n", + "value": "{\n \"classification\": {\n \"auc_roc\": {\n \"value\": 0.8941788639536681\n }\n }\n}" + }, + "MlEvaluateDataFrameResponseExample3": { + "summary": "Outlier detection", + "description": "A successful response from `POST _ml/data_frame/_evaluate` to evaluate an outlier detection job.", + "value": "{\n \"outlier_detection\": {\n \"auc_roc\": {\n \"value\": 0.9258475774641445\n },\n \"confusion_matrix\": {\n \"0.25\": {\n \"tp\": 5,\n \"fp\": 9,\n \"tn\": 204,\n \"fn\": 5\n },\n \"0.5\": {\n \"tp\": 1,\n \"fp\": 5,\n \"tn\": 208,\n \"fn\": 9\n },\n \"0.75\": {\n \"tp\": 0,\n \"fp\": 4,\n \"tn\": 209,\n \"fn\": 10\n }\n },\n \"precision\": {\n \"0.25\": 0.35714285714285715,\n \"0.5\": 0.16666666666666666,\n \"0.75\": 0\n },\n \"recall\": {\n \"0.25\": 0.5,\n \"0.5\": 0.1,\n \"0.75\": 0\n }\n }\n}" + } } } } @@ -22190,6 +22935,12 @@ "$ref": "#/components/schemas/_types:Duration" } } + }, + "examples": { + "MlOpenJobRequestExample1": { + "description": "A request to open anomaly detection jobs. The timeout specifies to wait 35 minutes for the job to open.\n", + "value": "{\n \"timeout\": \"35m\"\n}" + } } } } @@ -25884,6 +26635,12 @@ "_shards", "id" ] + }, + "examples": { + "OpenPointInTimeResponseExample1": { + "description": "A successful response from `POST /my-index-000001/_pit?keep_alive=1m&allow_partial_search_results=true`. It includes a summary of the total number of shards, as well as the number of successful shards when creating the PIT.\n", + "value": "{\n \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA=\",\n \"_shards\": {\n \"total\": 10,\n \"successful\": 10,\n \"skipped\": 0,\n \"failed\": 0\n }\n}" + } } } } @@ -26008,6 +26765,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/query_rules._types:QueryRule" + }, + "examples": { + "QueryRuleGetResponseExample1": { + "description": "A successful response from `GET _query_rules/my-ruleset/_rule/my-rule1`.", + "value": "{\n \"rule_id\": \"my-rule1\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"contains\",\n \"metadata\": \"query_string\",\n \"values\": [\n \"pugs\",\n \"puggles\"\n ]\n }\n ],\n \"actions\": {\n \"ids\": [\n \"id1\",\n \"id2\"\n ]\n }\n}" + } } } } @@ -26081,6 +26844,12 @@ "criteria", "actions" ] + }, + "examples": { + "QueryRulePutRequestExample1": { + "description": "Run `POST _query_rules/my-ruleset/_test` to test a ruleset. Provide the match criteria that you want to test against.\n", + "value": "{\n \"match_criteria\": {\n \"query_string\": \"puggles\"\n }\n}" + } } } }, @@ -26182,6 +26951,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/query_rules._types:QueryRuleset" + }, + "examples": { + "QueryRulesetGetResponseExample1": { + "description": "A successful response from `GET _query_rules/my-ruleset/`.", + "value": "{\n \"ruleset_id\": \"my-ruleset\",\n \"rules\": [\n {\n \"rule_id\": \"my-rule1\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"contains\",\n \"metadata\": \"query_string\",\n \"values\": [ \"pugs\", \"puggles\" ]\n }\n ],\n \"actions\": {\n \"ids\": [\n \"id1\",\n \"id2\"\n ]\n }\n },\n {\n \"rule_id\": \"my-rule2\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"fuzzy\",\n \"metadata\": \"query_string\",\n \"values\": [ \"rescue dogs\" ]\n }\n ],\n \"actions\": {\n \"docs\": [\n {\n \"_index\": \"index1\",\n \"_id\": \"id3\"\n },\n {\n \"_index\": \"index2\",\n \"_id\": \"id4\"\n }\n ]\n }\n }\n ]\n}" + } } } } @@ -26235,6 +27010,12 @@ "required": [ "rules" ] + }, + "examples": { + "QueryRulesetPutRequestExample1": { + "description": "Run `PUT _query_rules/my-ruleset` to create a new query ruleset. Two rules are associated with `my-ruleset`. `my-rule1` will pin documents with IDs `id1` and `id2` when `user_query` contains `pugs` or `puggles` and `user_country` exactly matches `us`. `my-rule2` will exclude documents from different specified indices with IDs `id3` and `id4` when the `query_string` fuzzily matches `rescue dogs`.\n", + "value": "{\n \"rules\": [\n {\n \"rule_id\": \"my-rule1\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"contains\",\n \"metadata\": \"user_query\",\n \"values\": [ \"pugs\", \"puggles\" ]\n },\n {\n \"type\": \"exact\",\n \"metadata\": \"user_country\",\n \"values\": [ \"us\" ]\n }\n ],\n \"actions\": {\n \"ids\": [\n \"id1\",\n \"id2\"\n ]\n }\n },\n {\n \"rule_id\": \"my-rule2\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"fuzzy\",\n \"metadata\": \"user_query\",\n \"values\": [ \"rescue dogs\" ]\n }\n ],\n \"actions\": {\n \"docs\": [\n {\n \"_index\": \"index1\",\n \"_id\": \"id3\"\n },\n {\n \"_index\": \"index2\",\n \"_id\": \"id4\"\n }\n ]\n }\n }\n ]\n}" + } } } }, @@ -26349,6 +27130,12 @@ "count", "results" ] + }, + "examples": { + "QueryRulesetListResponseExample1": { + "description": "A successful response from `GET _query_rules/?from=0&size=3`.", + "value": "{\n \"count\": 3,\n \"results\": [\n {\n \"ruleset_id\": \"ruleset-1\",\n \"rule_total_count\": 1,\n \"rule_criteria_types_counts\": {\n \"exact\": 1\n }\n },\n {\n \"ruleset_id\": \"ruleset-2\",\n \"rule_total_count\": 2,\n \"rule_criteria_types_counts\": {\n \"exact\": 1,\n \"fuzzy\": 1\n }\n },\n {\n \"ruleset_id\": \"ruleset-3\",\n \"rule_total_count\": 3,\n \"rule_criteria_types_counts\": {\n \"exact\": 1,\n \"fuzzy\": 2\n }\n }\n ]\n}" + } } } } @@ -26395,6 +27182,12 @@ "required": [ "match_criteria" ] + }, + "examples": { + "QueryRulesetTestRequestExample1": { + "description": "Run `PUT _query_rules/my-ruleset` to create a new query ruleset. Two rules are associated with `my-ruleset`. `my-rule1` will pin documents with IDs `id1` and `id2` when `user_query` contains `pugs` or `puggles` and `user_country` exactly matches `us`. `my-rule2` will exclude documents from different specified indices with IDs `id3` and `id4` when the `query_string` fuzzily matches `rescue dogs`.\n", + "value": "{\n \"rules\": [\n {\n \"rule_id\": \"my-rule1\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"contains\",\n \"metadata\": \"user_query\",\n \"values\": [ \"pugs\", \"puggles\" ]\n },\n {\n \"type\": \"exact\",\n \"metadata\": \"user_country\",\n \"values\": [ \"us\" ]\n }\n ],\n \"actions\": {\n \"ids\": [\n \"id1\",\n \"id2\"\n ]\n }\n },\n {\n \"rule_id\": \"my-rule2\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"fuzzy\",\n \"metadata\": \"user_query\",\n \"values\": [ \"rescue dogs\" ]\n }\n ],\n \"actions\": {\n \"docs\": [\n {\n \"_index\": \"index1\",\n \"_id\": \"id3\"\n },\n {\n \"_index\": \"index2\",\n \"_id\": \"id4\"\n }\n ]\n }\n }\n ]\n}" + } } } }, @@ -26422,6 +27215,12 @@ "total_matched_rules", "matched_rules" ] + }, + "examples": { + "QueryRulesetTestResponseExample1": { + "description": "A successful response from `POST _query_rules/my-ruleset/_test`.", + "value": "{\n \"total_matched_rules\": 1,\n \"matched_rules\": [\n {\n \"ruleset_id\": \"my-ruleset\",\n \"rule_id\": \"my-rule1\"\n }\n ]\n}" + } } } } @@ -26684,6 +27483,73 @@ "dest", "source" ] + }, + "examples": { + "ReindexRequestExample1": { + "summary": "Reindex multiple sources", + "description": "Run `POST _reindex` to reindex from multiple sources. The `index` attribute in source can be a list, which enables you to copy from lots of sources in one request. This example copies documents from the `my-index-000001` and `my-index-000002` indices.\n", + "value": "{\n \"source\": {\n \"index\": [\"my-index-000001\", \"my-index-000002\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000002\"\n }\n}" + }, + "ReindexRequestExample10": { + "summary": "Reindex with Painless", + "description": "You can use Painless to reindex daily indices to apply a new template to the existing documents. The script extracts the date from the index name and creates a new index with `-1` appended. For example, all data from `metricbeat-2016.05.31` will be reindexed into `metricbeat-2016.05.31-1`.\n", + "value": "{\n \"source\": {\n \"index\": \"metricbeat-*\"\n },\n \"dest\": {\n \"index\": \"metricbeat\"\n },\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'\"\n }\n}" + }, + "ReindexRequestExample11": { + "summary": "Reindex a random subset", + "description": "Run `POST _reindex` to extract a random subset of the source for testing. You might need to adjust the `min_score` value depending on the relative amount of data extracted from source.\n", + "value": "{\n \"max_docs\": 10,\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"function_score\" : {\n \"random_score\" : {},\n \"min_score\" : 0.9\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample12": { + "summary": "Reindex modified documents", + "description": "Run `POST _reindex` to modify documents during reindexing. This example bumps the version of the source document.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\",\n \"version_type\": \"external\"\n },\n \"script\": {\n \"source\": \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\",\n \"lang\": \"painless\"\n }\n}" + }, + "ReindexRequestExample13": { + "summary": "Reindex from remote on Elastic Cloud", + "description": "When using Elastic Cloud, you can run `POST _reindex` and authenticate against a remote cluster with an API key.\n", + "value": "{\n \"source\": {\n \"remote\": {\n \"host\": \"http://otherhost:9200\",\n \"username\": \"user\",\n \"password\": \"pass\"\n },\n \"index\": \"my-index-000001\",\n \"query\": {\n \"match\": {\n \"test\": \"data\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample2": { + "summary": "Manual slicing", + "description": "Run `POST _reindex` to slice a reindex request manually. Provide a slice ID and total number of slices to each request.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample3": { + "summary": "Automatic slicing", + "description": "Run `POST _reindex?slices=5&refresh` to automatically parallelize using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample4": { + "summary": "Routing", + "description": "By default if reindex sees a document with routing then the routing is preserved unless it's changed by the script. You can set `routing` on the `dest` request to change this behavior. In this example, run `POST _reindex` to copy all documents from the `source` with the company name `cat` into the `dest` with routing set to `cat`.\n", + "value": "{\n \"source\": {\n \"index\": \"source\",\n \"query\": {\n \"match\": {\n \"company\": \"cat\"\n }\n }\n },\n \"dest\": {\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n }\n}" + }, + "ReindexRequestExample5": { + "summary": "Ingest pipelines", + "description": "Run `POST _reindex` and use the ingest pipelines feature.", + "value": "{\n \"source\": {\n \"index\": \"source\"\n },\n \"dest\": {\n \"index\": \"dest\",\n \"pipeline\": \"some_ingest_pipeline\"\n }\n}" + }, + "ReindexRequestExample6": { + "summary": "Reindex with a query", + "description": "Run `POST _reindex` and add a query to the `source` to limit the documents to reindex. For example, this request copies documents into `my-new-index-000001` only if they have a `user.id` of `kimchy`.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample7": { + "summary": "Reindex with max_docs", + "description": "You can limit the number of processed documents by setting `max_docs`. For example, run `POST _reindex` to copy a single document from `my-index-000001` to `my-new-index-000001`.\n", + "value": "{\n \"max_docs\": 1,\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample8": { + "summary": "Reindex selected fields", + "description": "You can use source filtering to reindex a subset of the fields in the original documents. For example, run `POST _reindex` the reindex only the `user.id` and `_doc` fields of each document.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"_source\": [\"user.id\", \"_doc\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample9": { + "summary": "Reindex new field names", + "description": "A reindex operation can build a copy of an index with renamed fields. If your index has documents with `text` and `flag` fields, you can change the latter field name to `tag` during the reindex.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n },\n \"script\": {\n \"source\": \"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\"\n }\n}" + } } } }, @@ -26988,6 +27854,12 @@ "page_size", "rollup_index" ] + }, + "examples": { + "CreateRollupJobRequestExample1": { + "description": "Run `PUT _rollup/job/sensor` to create a rollup job that targets the `sensor-*` index pattern. This configuration enables date histograms to be used on the `timestamp` field and terms aggregations to be used on the `node` field. This configuration defines metrics over two fields: `temperature` and `voltage`. For the `temperature` field, it collects the `min`, `max`, and `sum` of the temperature. For `voltage`, it collects the `average`.\n", + "value": "{\n \"index_pattern\": \"sensor-*\",\n \"rollup_index\": \"sensor_rollup\",\n \"cron\": \"*/30 * * * * ?\",\n \"page_size\": 1000,\n \"groups\": {\n \"date_histogram\": {\n \"field\": \"timestamp\",\n \"fixed_interval\": \"1h\",\n \"delay\": \"7d\"\n },\n \"terms\": {\n \"fields\": [ \"node\" ]\n }\n },\n \"metrics\": [\n {\n \"field\": \"temperature\",\n \"metrics\": [ \"min\", \"max\", \"sum\" ]\n },\n {\n \"field\": \"voltage\",\n \"metrics\": [ \"avg\" ]\n }\n ]\n}" + } } } }, @@ -27000,6 +27872,11 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "CreateRollupJobResponseExample1": { + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -27049,6 +27926,12 @@ "required": [ "acknowledged" ] + }, + "examples": { + "DeleteRollupJobResponseExample1": { + "description": "A successful response from `DELETE _rollup/job/sensor`.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -27145,6 +28028,12 @@ "additionalProperties": { "$ref": "#/components/schemas/rollup.get_rollup_index_caps:IndexCapabilities" } + }, + "examples": { + "GetRollupIndexCapabilitiesResponseExample1": { + "description": "A successful response from `GET /sensor_rollup/_rollup/data`. The response contains the rollup job ID, the index that holds the rolled data, and the index pattern that the job was targeting. It also shows a list of fields that contain data eligible for rollup searches. For example, you can use a `min`, `max`, or `sum` aggregation on the `temperature` field, but only a `date_histogram` on `timestamp`.\n", + "value": "{\n \"sensor_rollup\" : {\n \"rollup_jobs\" : [\n {\n \"job_id\" : \"sensor\",\n \"rollup_index\" : \"sensor_rollup\",\n \"index_pattern\" : \"sensor-*\",\n \"fields\" : {\n \"node\" : [\n {\n \"agg\" : \"terms\"\n }\n ],\n \"temperature\" : [\n {\n \"agg\" : \"min\"\n },\n {\n \"agg\" : \"max\"\n },\n {\n \"agg\" : \"sum\"\n }\n ],\n \"timestamp\" : [\n {\n \"agg\" : \"date_histogram\",\n \"time_zone\" : \"UTC\",\n \"fixed_interval\" : \"1h\",\n \"delay\": \"7d\"\n }\n ],\n \"voltage\" : [\n {\n \"agg\" : \"avg\"\n }\n ]\n }\n }\n ]\n }\n}" + } } } } @@ -27250,6 +28139,12 @@ "required": [ "started" ] + }, + "examples": { + "StartRollupJobResponseExample1": { + "description": "A successful response from `POST _rollup/job/sensor/_start`.", + "value": "{\n \"started\": true\n}" + } } } } @@ -28050,6 +28945,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/search_application._types:SearchApplicationParameters" + }, + "examples": { + "SearchApplicationPutRequestExample1": { + "description": "Run `PUT _application/search_application/my-app` to create or update a search application called `my-app`. When the dictionary parameter is specified, the search application search API will perform the following parameter validation: it accepts only the `query_string` and `default_field` parameters; it verifies that `query_string` and `default_field` are both strings; it accepts `default_field` only if it takes the values title or description. If the parameters are not valid, the search application search API will return an error.\n", + "value": "{\n \"indices\": [ \"index1\", \"index2\" ],\n \"template\": {\n \"script\": {\n \"source\": {\n \"query\": {\n \"query_string\": {\n \"query\": \"{{query_string}}\",\n \"default_field\": \"{{default_field}}\"\n }\n }\n },\n \"params\": {\n \"query_string\": \"*\",\n \"default_field\": \"*\"\n }\n },\n \"dictionary\": {\n \"properties\": {\n \"query_string\": {\n \"type\": \"string\"\n },\n \"default_field\": {\n \"type\": \"string\",\n \"enum\": [\n \"title\",\n \"description\"\n ]\n },\n \"additionalProperties\": false\n },\n \"required\": [\n \"query_string\"\n ]\n }\n }\n}" + } } } }, @@ -28333,6 +29234,12 @@ "application/json": { "schema": { "type": "object" + }, + "examples": { + "BehavioralAnalyticsEventPostRequestExample1": { + "description": "Run `POST _application/analytics/my_analytics_collection/event/search_click` to send a `search_click` event to an analytics collection called `my_analytics_collection`.", + "value": "{\n \"session\": {\n \"id\": \"1797ca95-91c9-4e2e-b1bd-9c38e6f386a9\"\n },\n \"user\": {\n \"id\": \"5f26f01a-bbee-4202-9298-81261067abbd\"\n },\n \"search\":{\n \"query\": \"search term\",\n \"results\": {\n \"items\": [\n {\n \"document\": {\n \"id\": \"123\",\n \"index\": \"products\"\n }\n }\n ],\n \"total_results\": 10\n },\n \"sort\": {\n \"name\": \"relevance\"\n },\n \"search_application\": \"website\"\n },\n \"document\":{\n \"id\": \"123\",\n \"index\": \"products\"\n }\n}" + } } } }, @@ -28398,6 +29305,12 @@ } } } + }, + "examples": { + "SearchApplicationsRenderQueryRequestExample1": { + "description": "Run `POST _application/search_application/my-app/_render_query` to generate a query for a search application called `my-app` that uses the search template.", + "value": "{\n \"params\": {\n \"query_string\": \"my first query\",\n \"text_fields\": [\n {\n \"name\": \"title\",\n \"boost\": 5\n },\n {\n \"name\": \"description\",\n \"boost\": 1\n }\n ]\n }\n}" + } } } } @@ -29197,6 +30110,12 @@ "required": [ "index" ] + }, + "examples": { + "SearchableSnapshotsMountSnapshotRequestExample1": { + "description": "Run `POST /_snapshot/my_repository/my_snapshot/_mount?wait_for_completion=true` to mount the index `my_docs` from an existing snapshot named `my_snapshot` stored in `my_repository` as a new index `docs`.\n", + "value": "{\n \"index\": \"my_docs\",\n \"renamed_index\": \"docs\",\n \"index_settings\": {\n \"index.number_of_replicas\": 0\n },\n \"ignore_index_settings\": [ \"index.refresh_interval\" ]\n}" + } } } }, @@ -29301,6 +30220,12 @@ "required": [ "grant_type" ] + }, + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/profile/_activate` to activate a user profile.\n", + "value": "{\n \"grant_type\": \"password\",\n \"username\" : \"jacknich\",\n \"password\" : \"l0ng-r4nd0m-p@ssw0rd\"\n}" + } } } }, @@ -29313,6 +30238,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/security._types:UserProfileWithMetadata" + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `POST /_security/profile/_activate`.", + "value": "{\n \"uid\": \"u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0\",\n \"enabled\": true,\n \"last_synchronized\": 1642650651037,\n \"user\": {\n \"username\": \"jacknich\",\n \"roles\": [\n \"admin\", \"other_role1\"\n ],\n \"realm_name\": \"native\",\n \"full_name\": \"Jack Nicholson\",\n \"email\": \"jacknich@example.com\"\n },\n \"labels\": {},\n \"data\": {},\n \"_doc\": {\n \"_primary_term\": 88,\n \"_seq_no\": 66\n }\n}" + } } } } @@ -29399,6 +30330,12 @@ "enabled", "authentication_type" ] + }, + "examples": { + "SecurityAuthenticateResponseExample1": { + "description": "A successful response from `GET /_security/_authenticate`.", + "value": "{\n \"username\": \"rdeniro\",\n \"roles\": [\n \"admin\"\n ],\n \"full_name\": null,\n \"email\": null,\n \"metadata\": { },\n \"enabled\": true,\n \"authentication_realm\": {\n \"name\" : \"file\",\n \"type\" : \"file\"\n },\n \"lookup_realm\": {\n \"name\" : \"file\",\n \"type\" : \"file\"\n },\n \"authentication_type\": \"realm\"\n}" + } } } } @@ -29457,6 +30394,23 @@ "required": [ "roles" ] + }, + "examples": { + "SecurityBulkPutRoleRequestExample1": { + "summary": "Bulk role success", + "description": "Run `POST /_security/role` to add roles called `my_admin_role` and `my_user_role`.\n", + "value": "{\n \"roles\": {\n \"my_admin_role\": {\n \"cluster\": [\n \"all\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index1\",\n \"index2\"\n ],\n \"privileges\": [\n \"all\"\n ],\n \"field_security\": {\n \"grant\": [\n \"title\",\n \"body\"\n ]\n },\n \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\"\n }\n ],\n \"applications\": [\n {\n \"application\": \"myapp\",\n \"privileges\": [\n \"admin\",\n \"read\"\n ],\n \"resources\": [\n \"*\"\n ]\n }\n ],\n \"run_as\": [\n \"other_user\"\n ],\n \"metadata\": {\n \"version\": 1\n }\n },\n \"my_user_role\": {\n \"cluster\": [\n \"all\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index1\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"field_security\": {\n \"grant\": [\n \"title\",\n \"body\"\n ]\n },\n \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\"\n }\n ],\n \"applications\": [\n {\n \"application\": \"myapp\",\n \"privileges\": [\n \"admin\",\n \"read\"\n ],\n \"resources\": [\n \"*\"\n ]\n }\n ],\n \"run_as\": [\n \"other_user\"\n ],\n \"metadata\": {\n \"version\": 1\n }\n }\n }\n}" + }, + "SecurityBulkPutRoleRequestExample2": { + "summary": "Bulk role errors", + "description": "Because errors are handled individually for each role create or update, the API allows partial success. For example, `POST /_security/role` would throw an error for `my_admin_role` because the privilege `bad_cluster_privilege` doesn't exist, but would be successful for the `my_user_role`.\n", + "value": "{\n \"roles\": {\n \"my_admin_role\": {\n \"cluster\": [\n \"bad_cluster_privilege\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index1\",\n \"index2\"\n ],\n \"privileges\": [\"all\"],\n \"field_security\": {\n \"grant\": [\n \"title\",\n \"body\"\n ]\n },\n \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\"\n }\n ],\n \"applications\": [\n {\n \"application\": \"myapp\",\n \"privileges\": [\n \"admin\",\n \"read\"\n ],\n \"resources\": [\n \"*\"\n ]\n }\n ],\n \"run_as\": [\n \"other_user\"\n ],\n \"metadata\": {\n \"version\": 1\n }\n },\n \"my_user_role\": {\n \"cluster\": [\n \"all\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index1\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"field_security\": {\n \"grant\": [\n \"title\",\n \"body\"\n ]\n },\n \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\"\n }\n ],\n \"applications\": [\n {\n \"application\": \"myapp\",\n \"privileges\": [\n \"admin\",\n \"read\"\n ],\n \"resources\": [\n \"*\"\n ]\n }\n ],\n \"run_as\": [\n \"other_user\"\n ],\n \"metadata\": {\n \"version\": 1\n }\n }\n }\n}" + }, + "SecurityBulkPutRoleRequestExample3": { + "summary": "Role example 3", + "description": "Run `POST /_security/role/only_remote_access_role` to configure a role with remote indices and remote cluster privileges for a remote cluster.", + "value": "{\n \"remote_indices\": [\n {\n \"clusters\": [\"my_remote\"], \n \"names\": [\"logs*\"], \n \"privileges\": [\"read\", \"read_cross_cluster\", \"view_index_metadata\"] \n }\n ],\n \"remote_cluster\": [\n {\n \"clusters\": [\"my_remote\"], \n \"privileges\": [\"monitor_stats\"] \n }\n ]\n}" + } } } }, @@ -29495,6 +30449,18 @@ "$ref": "#/components/schemas/security._types:BulkError" } } + }, + "examples": { + "SecurityBulkPutRoleResponseExample1": { + "summary": "A successful response", + "description": "A successful response from `POST /_security/role/my_admin_role` returns a JSON structure that shows whether the role has been created, updated, or had no changes made.\n", + "value": "{\n \"created\": [ \n \"my_admin_role\", \n \"my_user_role\"\n ]\n}" + }, + "SecurityBulkPutRoleResponseExample2": { + "summary": "A partially successful response", + "description": "A partially successful response from `POST /_security/role`. Errors are handled individually for each role create or update, thus the API allows partial success. In this example, the creation of the `my_user_role` role succeeds and the `my_admin_role` role fails.\n", + "value": "{\n \"created\": [\n \"my_user_role\" \n ],\n \"errors\": { \n \"count\": 1, \n \"details\": {\n \"my_admin_role\": { \n \"type\": \"action_request_validation_exception\",\n \"reason\": \"Validation Failed: 1: unknown cluster privilege [bad_cluster_privilege]. a privilege must be either one of the predefined cluster privilege names [manage_own_api_key,manage_data_stream_global_retention,monitor_data_stream_global_retention,none,cancel_task,cross_cluster_replication,cross_cluster_search,delegate_pki,grant_api_key,manage_autoscaling,manage_index_templates,manage_logstash_pipelines,manage_oidc,manage_saml,manage_search_application,manage_search_query_rules,manage_search_synonyms,manage_service_account,manage_token,manage_user_profile,monitor_connector,monitor_enrich,monitor_inference,monitor_ml,monitor_rollup,monitor_snapshot,monitor_stats,monitor_text_structure,monitor_watcher,post_behavioral_analytics_event,read_ccr,read_connector_secrets,read_fleet_secrets,read_ilm,read_pipeline,read_security,read_slm,transport_client,write_connector_secrets,write_fleet_secrets,create_snapshot,manage_behavioral_analytics,manage_ccr,manage_connector,manage_enrich,manage_ilm,manage_inference,manage_ml,manage_rollup,manage_slm,manage_watcher,monitor_data_frame_transforms,monitor_transform,manage_api_key,manage_ingest_pipelines,manage_pipeline,manage_data_frame_transforms,manage_transform,manage_security,monitor,manage,all] or a pattern over one of the available cluster actions;\"\n }\n }\n }\n}" + } } } } @@ -29538,6 +30504,13 @@ "required": [ "names" ] + }, + "examples": { + "SecurityBulkDeleteRoleRequestExample1": { + "summary": "Bulk delete example 1", + "description": "Run DELETE /_security/role` to delete `my_admin_role` and `my_user_role` roles.\n", + "value": "{\n \"names\": [\"my_admin_role\", \"my_user_role\"]\n}" + } } } }, @@ -29569,6 +30542,23 @@ "$ref": "#/components/schemas/security._types:BulkError" } } + }, + "examples": { + "SecurityBulkDeleteRoleResponseExample1": { + "summary": "A successful response", + "description": "A successful response from `DELETE /_security/role`.", + "value": "{\n \"deleted\": [\n \"my_admin_role\",\n \"my_user_role\"\n ]\n}" + }, + "SecurityBulkDeleteRoleResponseExample2": { + "summary": "A response with not_found roles", + "description": "A partially successful response from `DELETE /_security/role`. If a role cannot be found, it appears in the `not_found` list in the response.\n", + "value": "{\n \"deleted\": [\n \"my_admin_role\"\n ],\n \"not_found\": [\n \"not_an_existing_role\"\n ]\n}" + }, + "SecurityBulkDeleteRoleResponseExample3": { + "summary": "A response with errors", + "description": "A partially successful response from `DELETE /_security/role`. If part of a request fails or is invalid, the response includes `errors`.\n", + "value": "{\n \"deleted\": [\n \"my_admin_role\"\n ],\n \"errors\": {\n \"count\": 1,\n \"details\": {\n \"superuser\": {\n \"type\": \"illegal_argument_exception\",\n \"reason\": \"role [superuser] is reserved and cannot be deleted\"\n }\n }\n }\n}" + } } } } @@ -29622,6 +30612,16 @@ "required": [ "ids" ] + }, + "examples": { + "SecurityBulkUpdateApiKeysRequestExample1": { + "description": "Assign new role descriptors and metadata and update the expiration time for two API keys.", + "value": "{\n \"ids\": [\n \"VuaCfGcBCdbkQm-e5aOx\",\n \"H3_AhoIBA9hmeQJdg7ij\"\n ],\n \"role_descriptors\": {\n \"role-a\": {\n \"indices\": [\n {\n \"names\": [\n \"*\"\n ],\n \"privileges\": [\n \"write\"\n ]\n }\n ]\n }\n },\n \"metadata\": {\n \"environment\": {\n \"level\": 2,\n \"trusted\": true,\n \"tags\": [\n \"production\"\n ]\n }\n },\n \"expiration\": \"30d\"\n}" + }, + "SecurityBulkUpdateApiKeysRequestExample2": { + "description": "Remove the previously assigned permissions for two API keys, making them inherit the owner user's full permissions.", + "value": "{\n \"ids\": [\n \"VuaCfGcBCdbkQm-e5aOx\",\n \"H3_AhoIBA9hmeQJdg7ij\"\n ],\n \"role_descriptors\": {}\n}" + } } } }, @@ -30178,6 +31178,18 @@ "required": [ "api_keys" ] + }, + "examples": { + "SecurityGetApiKeyResponseExample1": { + "summary": "Get a key by ID", + "description": "A successful response from `GET /_security/api_key?id=VuaCfGcBCdbkQm-e5aOx&with_limited_by=true`.", + "value": "{\n \"api_keys\": [ \n {\n \"id\": \"VuaCfGcBCdbkQm-e5aOx\", \n \"name\": \"my-api-key\", \n \"creation\": 1548550550158, \n \"expiration\": 1548551550158, \n \"invalidated\": false, \n \"username\": \"myuser\", \n \"realm\": \"native1\", \n \"realm_type\": \"native\",\n \"metadata\": { \n \"application\": \"myapp\"\n },\n \"role_descriptors\": { }, \n \"limited_by\": [ \n {\n \"role-power-user\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"*\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n ]\n }\n ]\n}" + }, + "SecurityGetApiKeyResponseExample2": { + "summary": "Get all keys for a user", + "description": "A successful response from `GET /_security/api_key?username=myuser&realm_name=native1`. The response contains all API keys for the user `myuser` in the `native1` realm.\n", + "value": "{\n \"api_keys\": [\n {\n \"id\": \"0GF5GXsBCXxz2eDxWwFN\",\n \"name\": \"hadoop_myuser_key\",\n \"creation\": 1548550550158,\n \"expiration\": 1548551550158,\n \"invalidated\": false,\n \"username\": \"myuser\",\n \"realm\": \"native1\",\n \"realm_type\": \"native\",\n \"metadata\": {\n \"application\": \"myapp\"\n },\n \"role_descriptors\": {\n \"role-a\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index-a\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n },\n {\n \"id\": \"6wHJmcQpReKBa42EHV5SBw\",\n \"name\": \"api-key-name-2\",\n \"creation\": 1548550550158,\n \"invalidated\": false,\n \"username\": \"user-y\",\n \"realm\": \"realm-2\",\n \"metadata\": {},\n \"role_descriptors\": { }\n }\n ]\n}" + } } } } @@ -30273,6 +31285,38 @@ "$ref": "#/components/schemas/_types:Username" } } + }, + "examples": { + "SecurityInvalidateApiKeyRequestExample1": { + "summary": "API keys by ID", + "description": "Run `DELETE /_security/api_key` to invalidate the API keys identified by ID.", + "value": "{\n \"ids\" : [ \"VuaCfGcBCdbkQm-e5aOx\" ]\n}" + }, + "SecurityInvalidateApiKeyRequestExample2": { + "summary": "API keys by name", + "description": "Run `DELETE /_security/api_key` to invalidate the API keys identified by name.", + "value": "{\n \"name\" : \"my-api-key\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample3": { + "summary": "API keys by realm", + "description": "Run `DELETE /_security/api_key` to invalidate all API keys for the `native1` realm.", + "value": "{\n \"realm_name\" : \"native1\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample4": { + "summary": "API keys by user", + "description": "Run `DELETE /_security/api_key` to invalidate all API keys for the user `myuser` in all realms.", + "value": "{\n \"username\" : \"myuser\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample5": { + "summary": "API keys by ID and owner", + "description": "Run `DELETE /_security/api_key` to invalidate the API keys identified by ID if they are owned by the currently authenticated user.", + "value": "{\n \"ids\" : [\"VuaCfGcBCdbkQm-e5aOx\"],\n \"owner\" : \"true\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample6": { + "summary": "API keys by user and realm", + "description": "Run `DELETE /_security/api_key` to invalidate all API keys for the user `myuser` in the `native1` realm .", + "value": "{\n \"username\" : \"myuser\",\n \"realm_name\" : \"native1\"\n}" + } } } }, @@ -30317,6 +31361,12 @@ "invalidated_api_keys", "previously_invalidated_api_keys" ] + }, + "examples": { + "SecurityInvalidateApiKeyResponseExample1": { + "description": "A successful response from `DELETE /_security/api_key`.\n", + "value": "{\n \"invalidated_api_keys\": [ \n \"api-key-id-1\"\n ],\n \"previously_invalidated_api_keys\": [ \n \"api-key-id-2\",\n \"api-key-id-3\"\n ],\n \"error_count\": 2, \n \"error_details\": [ \n {\n \"type\": \"exception\",\n \"reason\": \"error occurred while invalidating api keys\",\n \"caused_by\": {\n \"type\": \"illegal_argument_exception\",\n \"reason\": \"invalid api key id\"\n }\n },\n {\n \"type\": \"exception\",\n \"reason\": \"error occurred while invalidating api keys\",\n \"caused_by\": {\n \"type\": \"illegal_argument_exception\",\n \"reason\": \"invalid api key id\"\n }\n }\n ]\n}" + } } } } @@ -30359,6 +31409,12 @@ "access", "name" ] + }, + "examples": { + "CreateCrossClusterApiKeyRequestExample1": { + "description": "Run `POST /_security/cross_cluster/api_key` to create a cross-cluster API key.\n", + "value": "{\n \"name\": \"my-cross-cluster-api-key\",\n \"expiration\": \"1d\", \n \"access\": {\n \"search\": [ \n {\n \"names\": [\"logs*\"]\n }\n ],\n \"replication\": [ \n {\n \"names\": [\"archive*\"]\n }\n ]\n },\n \"metadata\": {\n \"description\": \"phase one\",\n \"environment\": {\n \"level\": 1,\n \"trusted\": true,\n \"tags\": [\"dev\", \"staging\"]\n }\n }\n}" + } } } }, @@ -30396,6 +31452,12 @@ "name", "encoded" ] + }, + "examples": { + "CreateCrossClusterApiKeyResponseExample1": { + "description": "A successful response from `POST /_security/service/elastic/fleet-server/credential/token`.\n", + "value": "{\n \"created\": true,\n \"token\": {\n \"name\": \"Jk5J1HgBuyBK5TpDrdo4\",\n \"value\": \"AAEAAWVsYXN0aWM...vZmxlZXQtc2VydmVyL3Rva2VuMTo3TFdaSDZ\"\n }\n}" + } } } } @@ -30535,6 +31597,12 @@ "required": [ "found" ] + }, + "examples": { + "DeleteServiceTokenResponseExample1": { + "description": "A successful response from `DELETE /_security/service/elastic/fleet-server/credential/token/token42`.\n", + "value": "{\n \"found\" : true\n}" + } } } } @@ -30600,6 +31668,12 @@ "required": [ "x509_certificate_chain" ] + }, + "examples": { + "SecurityDelegatePkiRequestExample1": { + "description": "Delegate a one element certificate chain.", + "value": "{\n\"x509_certificate_chain\": [\"MIIDeDCCAmCgAwIBAgIUBzj/nGGKxP2iXawsSquHmQjCJmMwDQYJKoZIhvcNAQELBQAwUzErMCkGA1UEAxMiRWxhc3RpY3NlYXJjaCBUZXN0IEludGVybWVkaWF0ZSBDQTEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMB4XDTIzMDcxODE5MjkwNloXDTQzMDcxMzE5MjkwNlowSjEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAllHL4pQkkfwAm/oLkxYYO+r950DEy1bjH+4viCHzNADLCTWO+lOZJVlNx7QEzJE3QGMdif9CCBBxQFMapA7oUFCLq84fPSQQu5AnvvbltVD9nwVtCs+9ZGDjMKsz98RhSLMFIkxdxi6HkQ3Lfa4ZSI4lvba4oo+T/GveazBDS+NgmKyq00EOXt3tWi1G9vEVItommzXWfv0agJWzVnLMldwkPqsw0W7zrpyT7FZS4iLbQADGceOW8fiauOGMkscu9zAnDR/SbWl/chYioQOdw6ndFLn1YIFPd37xL0WsdsldTpn0vH3YfzgLMffT/3P6YlwBegWzsx6FnM/93Ecb4wIDAQABo00wSzAJBgNVHRMEAjAAMB0GA1UdDgQWBBQKNRwjW+Ad/FN1Rpoqme/5+jrFWzAfBgNVHSMEGDAWgBRcya0c0x/PaI7MbmJVIylWgLqXNjANBgkqhkiG9w0BAQsFAAOCAQEACZ3PF7Uqu47lplXHP6YlzYL2jL0D28hpj5lGtdha4Muw1m/BjDb0Pu8l0NQ1z3AP6AVcvjNDkQq6Y5jeSz0bwQlealQpYfo7EMXjOidrft1GbqOMFmTBLpLA9SvwYGobSTXWTkJzonqVaTcf80HpMgM2uEhodwTcvz6v1WEfeT/HMjmdIsq4ImrOL9RNrcZG6nWfw0HR3JNOgrbfyEztEI471jHznZ336OEcyX7gQuvHE8tOv5+oD1d7s3Xg1yuFp+Ynh+FfOi3hPCuaHA+7F6fLmzMDLVUBAllugst1C3U+L/paD7tqIa4ka+KNPCbSfwazmJrt4XNiivPR4hwH5g==\"]\n}" + } } } }, @@ -30725,6 +31799,12 @@ "$ref": "#/components/schemas/security.delete_privileges:FoundStatus" } } + }, + "examples": { + "SecurityDeletePrivilegesResponseExample1": { + "description": "A successful response from `DELETE /_security/privilege/myapp/read`. If the privilege is successfully deleted, `found` is set to `true`.\n", + "value": "{\n \"myapp\": {\n \"read\": {\n \"found\" : true\n }\n }\n}" + } } } } @@ -30852,6 +31932,12 @@ "required": [ "found" ] + }, + "examples": { + "SecurityDeleteRoleResponseExample1": { + "description": "A successful response from `DELETE /_security/role/my_admin_role`. If the role is successfully deleted, `found` is set to `true`.\n", + "value": "{\n \"found\" : true\n}" + } } } } @@ -30987,6 +32073,12 @@ "required": [ "found" ] + }, + "examples": { + "SecurityDeleteRoleMappingResponseExample1": { + "description": "A successful response from `DELETE /_security/role_mapping/mapping1`. If the mapping is successfully deleted, `found` is `true`.\n", + "value": "{\n \"found\" : true\n}" + } } } } @@ -31111,6 +32203,12 @@ "required": [ "found" ] + }, + "examples": { + "SecurityDeleteUserResponseExample1": { + "description": "A successful response from `DELETE /_security/user/jacknich`.\n", + "value": "{\n \"found\" : true\n}" + } } } } @@ -31326,6 +32424,12 @@ "token", "http_ca" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `GET /_security/enroll/kibana`.", + "value": "{\n \"token\" : {\n \"name\" : \"enroll-process-token-1629123923000\", \n \"value\": \"AAEAAWVsYXN0aWM...vZmxlZXQtc2VydmVyL3Rva2VuMTo3TFdaSDZ\" \n },\n \"http_ca\" : \"MIIJlAIBAzVoGCSqGSIb3...vsDfsA3UZBAjEPfhubpQysAICAA=\", \n}" + } } } } @@ -31386,6 +32490,12 @@ "transport_cert", "nodes_addresses" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `PGET /security/enroll/node`.", + "value": "{\n \"http_ca_key\" : \"MIIJlAIBAzCCCVoGCSqGSIb3DQEHAaCCCUsEgglHMIIJQzCCA98GCSqGSIb3DQ....vsDfsA3UZBAjEPfhubpQysAICCAA=\", \n \"http_ca_cert\" : \"MIIJlAIBAzCCCVoGCSqGSIb3DQEHAaCCCUsEgglHMIIJQzCCA98GCSqGSIb3DQ....vsDfsA3UZBAjEPfhubpQysAICCAA=\", \n \"transport_ca_cert\" : \"MIIJlAIBAzCCCVoGCSqGSIb3DQEHAaCCCUsEgglHMIIJQzCCA98GCSqG....vsDfsA3UZBAjEPfhubpQysAICCAA=\", \n \"transport_key\" : \"MIIEJgIBAzCCA98GCSqGSIb3DQEHAaCCA9AEggPMMIIDyDCCA8QGCSqGSIb3....YuEiOXvqZ6jxuVSQ0CAwGGoA==\", \n \"transport_cert\" : \"MIIEJgIBAzCCA98GCSqGSIb3DQEHAaCCA9AEggPMMIIDyDCCA8QGCSqGSIb3....YuEiOXvqZ6jxuVSQ0CAwGGoA==\", \n \"nodes_addresses\" : [ \n \"192.168.1.2:9300\"\n ]\n}" + } } } } @@ -31440,6 +32550,12 @@ "index", "remote_cluster" ] + }, + "examples": { + "SecurityGetBuiltinPrivilegesResponseExample1": { + "description": "A successful response from `GET /_security/privilege/_builtin`.", + "value": "{\n \"cluster\" : [\n \"all\",\n \"cancel_task\",\n \"create_snapshot\",\n \"cross_cluster_replication\",\n \"cross_cluster_search\",\n \"delegate_pki\",\n \"grant_api_key\",\n \"manage\",\n \"manage_api_key\",\n \"manage_autoscaling\",\n \"manage_behavioral_analytics\",\n \"manage_ccr\",\n \"manage_connector\",\n \"manage_data_frame_transforms\",\n \"manage_data_stream_global_retention\",\n \"manage_enrich\",\n \"manage_ilm\",\n \"manage_index_templates\",\n \"manage_inference\",\n \"manage_ingest_pipelines\",\n \"manage_logstash_pipelines\",\n \"manage_ml\",\n \"manage_oidc\",\n \"manage_own_api_key\",\n \"manage_pipeline\",\n \"manage_rollup\",\n \"manage_saml\",\n \"manage_search_application\",\n \"manage_search_query_rules\",\n \"manage_search_synonyms\",\n \"manage_security\",\n \"manage_service_account\",\n \"manage_slm\",\n \"manage_token\",\n \"manage_transform\",\n \"manage_user_profile\",\n \"manage_watcher\",\n \"monitor\",\n \"monitor_connector\",\n \"monitor_data_frame_transforms\",\n \"monitor_data_stream_global_retention\",\n \"monitor_enrich\",\n \"monitor_inference\",\n \"monitor_ml\",\n \"monitor_rollup\",\n \"monitor_snapshot\",\n \"monitor_stats\",\n \"monitor_text_structure\",\n \"monitor_transform\",\n \"monitor_watcher\",\n \"none\",\n \"post_behavioral_analytics_event\",\n \"read_ccr\",\n \"read_connector_secrets\",\n \"read_fleet_secrets\",\n \"read_ilm\",\n \"read_pipeline\",\n \"read_security\",\n \"read_slm\",\n \"transport_client\",\n \"write_connector_secrets\",\n \"write_fleet_secrets\"\n ],\n \"index\" : [\n \"all\",\n \"auto_configure\",\n \"create\",\n \"create_doc\",\n \"create_index\",\n \"cross_cluster_replication\",\n \"cross_cluster_replication_internal\",\n \"delete\",\n \"delete_index\",\n \"index\",\n \"maintenance\",\n \"manage\",\n \"manage_data_stream_lifecycle\",\n \"manage_follow_index\",\n \"manage_ilm\",\n \"manage_leader_index\",\n \"monitor\",\n \"none\",\n \"read\",\n \"read_cross_cluster\",\n \"view_index_metadata\",\n \"write\"\n ],\n \"remote_cluster\" : [\n \"monitor_enrich\",\n \"monitor_stats\"\n ]\n}" + } } } } @@ -31695,6 +32811,12 @@ "tokens", "nodes_credentials" ] + }, + "examples": { + "GetServiceCredentialsResponseExample1": { + "description": "A successful response from `GET /_security/service/elastic/fleet-server/credential`. The response contains all credentials for the `elastic/fleet-server` service account.\n", + "value": "{\n \"service_account\": \"elastic/fleet-server\",\n \"count\": 3,\n \"tokens\": {\n \"token1\": {}, \n \"token42\": {} \n },\n \"nodes_credentials\": { \n \"_nodes\": { \n \"total\": 3,\n \"successful\": 3,\n \"failed\": 0\n },\n \"file_tokens\": { \n \"my-token\": {\n \"nodes\": [ \"node0\", \"node1\" ] \n }\n }\n }\n}" + } } } } @@ -31797,6 +32919,12 @@ "$ref": "#/components/schemas/security._types:SecuritySettings" } } + }, + "examples": { + "SecurityUpdateSettingsRequestExample1": { + "description": "Run `PUT /_security/settings` to modify the security settings.", + "value": "{\n \"security\": {\n \"index.auto_expand_replicas\": \"0-all\"\n },\n \"security-tokens\": {\n \"index.auto_expand_replicas\": \"0-all\"\n },\n \"security-profile\": {\n \"index.auto_expand_replicas\": \"0-all\"\n }\n}" + } } } }, @@ -31863,6 +32991,18 @@ "$ref": "#/components/schemas/_types:Username" } } + }, + "examples": { + "GetUserAccessTokenRequestExample1": { + "summary": "A client_credentials grant type example", + "description": "Run `POST /_security/oauth2/token` to obtain a token using the `client_credentials` grant type, which simply creates a token as the authenticated user.\n", + "value": "{\n \"grant_type\" : \"client_credentials\"\n}" + }, + "GetUserAccessTokenRequestExample2": { + "summary": "A password grant type example", + "description": "Run `POST /_security/oauth2/token` to obtain a token for the `test_admin` user using the password grant type. This request needs to be made by an authenticated user with sufficient privileges that may or may not be the same as the one whose username is passed in the `username` parameter.\n", + "value": "{\n \"grant_type\" : \"password\",\n \"username\" : \"test_admin\",\n \"password\" : \"x-pack-test-password\"\n}" + } } } }, @@ -31904,6 +33044,18 @@ "type", "authentication" ] + }, + "examples": { + "GetUserAccessTokenResponseExample1": { + "summary": "A client_credentials grant type example", + "description": "A successful response from `POST /_security/oauth2/token`.", + "value": "{\n \"access_token\" : \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\",\n \"type\" : \"Bearer\",\n \"expires_in\" : 1200,\n \"authentication\" : {\n \"username\" : \"test_admin\",\n \"roles\" : [\n \"superuser\"\n ],\n \"full_name\" : null,\n \"email\" : null,\n \"metadata\" : { },\n \"enabled\" : true,\n \"authentication_realm\" : {\n \"name\" : \"file\",\n \"type\" : \"file\"\n },\n \"lookup_realm\" : {\n \"name\" : \"file\",\n \"type\" : \"file\"\n },\n \"authentication_type\" : \"realm\"\n }\n}" + }, + "GetUserAccessTokenResponseExample2": { + "summary": "A password grant type example", + "description": "A successful response from `POST /_security/oauth2/token`.", + "value": "{\n \"access_token\" : \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\",\n \"type\" : \"Bearer\",\n \"expires_in\" : 1200,\n \"authentication\" : {\n \"username\" : \"test_admin\",\n \"roles\" : [\n \"superuser\"\n ],\n \"full_name\" : null,\n \"email\" : null,\n \"metadata\" : { },\n \"enabled\" : true,\n \"authentication_realm\" : {\n \"name\" : \"file\",\n \"type\" : \"file\"\n },\n \"lookup_realm\" : {\n \"name\" : \"file\",\n \"type\" : \"file\"\n },\n \"authentication_type\" : \"realm\"\n }\n}" + } } } } @@ -31939,6 +33091,33 @@ "$ref": "#/components/schemas/_types:Username" } } + }, + "examples": { + "SecurityInvalidateTokenRequestExample1": { + "summary": "Invalidate an access token", + "description": "Run `DELETE /_security/oauth2/token` to invalidate an access token.\n", + "value": "{\n \"token\" : \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\"\n}" + }, + "SecurityInvalidateTokenRequestExample2": { + "summary": "Invalidate a refresh token", + "description": "Run `DELETE /_security/oauth2/token` to invalidate a refresh token.\n", + "value": "{\n \"refresh_token\" : \"vLBPvmAB6KvwvJZr27cS\"\n}" + }, + "SecurityInvalidateTokenRequestExample3": { + "summary": "Invalidate tokens by realm", + "description": "Run `DELETE /_security/oauth2/token` to invalidate all access tokens and refresh tokens for the `saml1` realm.", + "value": "{\n \"realm_name\" : \"saml1\"\n}" + }, + "SecurityInvalidateTokenRequestExample4": { + "summary": "Invalidate tokens by user", + "description": "Run `DELETE /_security/oauth2/token` to invalidate all access tokens and refresh tokens for the user `myuser` in all realms.", + "value": "{\n \"username\" : \"myuser\"\n}" + }, + "SecurityInvalidateTokenRequestExample5": { + "summary": "Invalidate tokens by user and realm", + "description": "Run `DELETE /_security/oauth2/token` to invalidate all access tokens and refresh tokens for the user `myuser` in the `saml1` realm.", + "value": "{\n \"username\" : \"myuser\",\n \"realm_name\" : \"saml1\"\n}" + } } } }, @@ -31977,6 +33156,12 @@ "invalidated_tokens", "previously_invalidated_tokens" ] + }, + "examples": { + "SecurityInvalidateTokenResponseExample1": { + "description": "A partially successful response from `DELETE /_security/oauth2/token`. The response includes the number of the tokens that were invalidated, the number of errors that were encountered when invalidating the tokens, and details about these errors.\n", + "value": "{\n \"invalidated_tokens\":9, \n \"previously_invalidated_tokens\":15, \n \"error_count\":2, \n \"error_details\":[ \n {\n \"type\":\"exception\",\n \"reason\":\"Elasticsearch exception [type=exception, reason=foo]\",\n \"caused_by\":{\n \"type\":\"exception\",\n \"reason\":\"Elasticsearch exception [type=illegal_argument_exception, reason=bar]\"\n }\n },\n {\n \"type\":\"exception\",\n \"reason\":\"Elasticsearch exception [type=exception, reason=boo]\",\n \"caused_by\":{\n \"type\":\"exception\",\n \"reason\":\"Elasticsearch exception [type=illegal_argument_exception, reason=far]\"\n }\n }\n ]\n}" + } } } } @@ -32098,6 +33283,12 @@ "indices", "run_as" ] + }, + "examples": { + "SecurityGetUserPrivilegesResponseExample1": { + "description": "A successful response from `GET /_security/user/_privileges`.", + "value": "{\n \"cluster\" : [\n \"all\"\n ],\n \"global\" : [ ],\n \"indices\" : [\n {\n \"names\" : [\n \"*\"\n ],\n \"privileges\" : [\n \"all\"\n ],\n \"allow_restricted_indices\" : true\n }\n ],\n \"applications\" : [\n {\n \"application\" : \"*\",\n \"privileges\" : [\n \"*\"\n ],\n \"resources\" : [\n \"*\"\n ]\n }\n ],\n \"run_as\" : [\n \"*\"\n ]\n}" + } } } } @@ -32179,6 +33370,23 @@ "required": [ "profiles" ] + }, + "examples": { + "ResponseExample1": { + "summary": "Profile details for a UUID", + "description": "A successful response from `GET /_security/profile/u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0`. By default, no content is returned in the `data` field.\n", + "value": "{\n \"profiles\": [\n {\n \"uid\": \"u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0\",\n \"enabled\": true,\n \"last_synchronized\": 1642650651037,\n \"user\": {\n \"username\": \"jacknich\",\n \"roles\": [\n \"admin\", \"other_role1\"\n ],\n \"realm_name\": \"native\",\n \"full_name\": \"Jack Nicholson\",\n \"email\": \"jacknich@example.com\"\n },\n \"labels\": {\n \"direction\": \"north\"\n },\n \"data\": {}, \n \"_doc\": {\n \"_primary_term\": 88,\n \"_seq_no\": 66\n }\n }\n ]\n}" + }, + "ResponseExample2": { + "summary": "Profile details for a UUID and data key", + "description": "A successful response from `GET /_security/profile/u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0?data=app1.key1`.\n", + "value": "{\n \"profiles\": [\n {\n \"uid\": \"u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0\",\n \"enabled\": true,\n \"last_synchronized\": 1642650651037,\n \"user\": {\n \"username\": \"jacknich\",\n \"roles\": [\n \"admin\", \"other_role1\"\n ],\n \"realm_name\": \"native\",\n \"full_name\": \"Jack Nicholson\",\n \"email\": \"jacknich@example.com\"\n },\n \"labels\": {\n \"direction\": \"north\"\n },\n \"data\": {\n \"app1\": {\n \"key1\": \"value1\"\n }\n },\n \"_doc\": {\n \"_primary_term\": 88,\n \"_seq_no\": 66\n }\n }\n ]\n}" + }, + "ResponseExample3": { + "summary": "Profile details with errors", + "description": "A response that contains errors that occurred while retrieving user profiles.\n", + "value": "{\n \"profiles\": [],\n \"errors\": {\n \"count\": 1,\n \"details\": {\n \"u_FmxQt3gr1BBH5wpnz9HkouPj3Q710XkOgg1PWkwLPBW_5\": {\n \"type\": \"resource_not_found_exception\",\n \"reason\": \"profile document not found\"\n }\n }\n }\n}" + } } } } @@ -32225,6 +33433,18 @@ "api_key", "grant_type" ] + }, + "examples": { + "SecurityGrantApiKeyRequestExample1": { + "summary": "Grant an API key", + "description": "Run `POST /_security/api_key/grant` to create an API key on behalf of the `test_admin` user.\n", + "value": "{\n \"grant_type\": \"password\",\n \"username\" : \"test_admin\",\n \"password\" : \"x-pack-test-password\",\n \"api_key\" : {\n \"name\": \"my-api-key\",\n \"expiration\": \"1d\",\n \"role_descriptors\": {\n \"role-a\": {\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [\"index-a*\"],\n \"privileges\": [\"read\"]\n }\n ]\n },\n \"role-b\": {\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [\"index-b*\"],\n \"privileges\": [\"all\"]\n }\n ]\n }\n },\n \"metadata\": {\n \"application\": \"my-application\",\n \"environment\": {\n \"level\": 1,\n \"trusted\": true,\n \"tags\": [\"dev\", \"staging\"]\n }\n }\n }\n}" + }, + "SecurityGrantApiKeyRequestExample2": { + "summary": "Grant an API key with run_as", + "description": "Run `POST /_security/api_key/grant`. The user (`test_admin`) whose credentials are provided can \"run as\" another user (`test_user`). The API key will be granted to the impersonated user (`test_user`).\n", + "value": "{\n \"grant_type\": \"password\",\n \"username\" : \"test_admin\", \n \"password\" : \"x-pack-test-password\", \n \"run_as\": \"test_user\", \n \"api_key\" : {\n \"name\": \"another-api-key\"\n }\n}" + } } } }, @@ -32440,6 +33660,12 @@ "redirect_uri", "state" ] + }, + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/oidc/authenticate` to exchange the response that was returned from the OpenID Connect Provider after a successful authentication for an Elasticsearch access token and refresh token. This example is from an authentication that uses the authorization code grant flow.\n", + "value": "{\n \"redirect_uri\" : \"https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I\",\n \"state\" : \"4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I\",\n \"nonce\" : \"WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM\",\n \"realm\" : \"oidc1\"\n}" + } } } }, @@ -32476,6 +33702,12 @@ "refresh_token", "type" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `POST /_security/oidc/authenticate`. It contains the access and refresh tokens that were generated, the token duration (in seconds), and the type.\n", + "value": "{\n \"access_token\" : \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\",\n \"type\" : \"Bearer\",\n \"expires_in\" : 1200,\n \"refresh_token\": \"vLBPvmAB6KvwvJZr27cS\"\n}" + } } } } @@ -32509,6 +33741,12 @@ "required": [ "access_token" ] + }, + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/oidc/logout` to perform the logout.", + "value": "{\n \"token\" : \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\",\n \"refresh_token\": \"vLBPvmAB6KvwvJZr27cS\"\n}" + } } } }, @@ -32530,6 +33768,12 @@ "required": [ "redirect" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `POST /_security/oidc/logout`, which contains the URI pointing to the End Session Endpoint of the OpenID Connect Provider with all the parameters of the Logout Request as HTTP GET parameters.", + "value": "{\n \"redirect\" : \"https://op-provider.org/logout?id_token_hint=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c&post_logout_redirect_uri=http%3A%2F%2Foidc-kibana.elastic.co%2Floggedout&state=lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO\"\n}" + } } } } @@ -32572,6 +33816,23 @@ "type": "string" } } + }, + "examples": { + "RequestExample1": { + "summary": "Prepare with realm", + "description": "Run `POST /_security/oidc/prepare` to generate an authentication request for the OpenID Connect Realm `oidc1`.\n", + "value": "{\n \"realm\" : \"oidc1\"\n}" + }, + "RequestExample2": { + "summary": "Prepare with realm, state, and nonce", + "description": "Run `POST /_security/oidc/prepare` to generate an authentication request for the OpenID Connect Realm `oidc1`, where the values for the `state` and the `nonce` have been generated by the client.\n", + "value": "{\n \"realm\" : \"oidc1\",\n \"state\" : \"lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO\",\n \"nonce\" : \"zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5\"\n}" + }, + "RequestExample3": { + "summary": "Prepare by realm", + "description": "Run `POST /_security/oidc/prepare` to generate an authentication request for a third party initiated single sign on. Specify the issuer that should be used for matching the appropriate OpenID Connect Authentication realm.\n", + "value": "{\n \"iss\" : \"http://127.0.0.1:8080\",\n \"login_hint\": \"this_is_an_opaque_string\"\n}" + } } } }, @@ -32605,6 +33866,12 @@ "redirect", "state" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `POST /_security/oidc/prepare`. It contains the URI pointing to the Authorization Endpoint of the OpenID Connect Provider with all the parameters of the Authentication Request as HTTP GET parameters.\n", + "value": "{\n \"redirect\" : \"http://127.0.0.1:8080/c2id-login?scope=openid&response_type=id_token&redirect_uri=https%3A%2F%2Fmy.fantastic.rp%2Fcb&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I&nonce=WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM&client_id=elasticsearch-rp\",\n \"state\" : \"4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I\",\n \"nonce\" : \"WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM\",\n \"realm\" : \"oidc1\"\n}" + } } } } @@ -32785,6 +34052,12 @@ "content", "ids" ] + }, + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/saml/authenticate` to exchange a SAML Response indicating a successful authentication at the SAML IdP for an Elasticsearch access token and refresh token to be used in subsequent requests.\n", + "value": "{\n \"content\" : \"PHNhbWxwOlJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6cHJvdG9jb2wiIHhtbG5zOnNhbWw9InVybjpvYXNpczpuYW1lczp0YzpTQU1MOjIuMD.....\",\n \"ids\" : [\"4fee3b046395c4e751011e97f8900b5273d56685\"]\n}" + } } } }, @@ -32826,6 +34099,12 @@ "refresh_token", "realm" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `POST /_security/saml/authenticate`.", + "value": "{\n \"access_token\" : \"46ToAxZVaXVVZTVKOVF5YU04ZFJVUDVSZlV3\",\n \"username\" : \"Bearer\",\n \"expires_in\" : 1200,\n \"refresh_token\": \"mJdXLtmvTUSpoLwMvdBt_w\",\n \"realm\": \"saml1\"\n}" + } } } } @@ -32871,6 +34150,18 @@ "realm", "ids" ] + }, + "examples": { + "RequestExample1": { + "summary": "HTTP-Redirect binding", + "description": "Run `POST /_security/saml/complete_logout` to verify the logout response sent by the SAML IdP using the HTTP-Redirect binding.\n", + "value": "{\n \"realm\": \"saml1\",\n \"ids\": [ \"_1c368075e0b3...\" ],\n \"query_string\": \"SAMLResponse=fZHLasMwEEVbfb1bf...&SigAlg=http%3A%2F%2Fwww.w3.org%2F2000%2F09%2Fxmldsig%23rsa-sha1&Signature=CuCmFn%2BLqnaZGZJqK...\"\n}" + }, + "RequestExample2": { + "summary": "HTTP-Post binding", + "description": "Run `POST /_security/saml/complete_logout` to verify the logout response sent by the SAML IdP using the HTTP-Post binding.\n", + "value": "{\n \"realm\": \"saml1\",\n \"ids\": [ \"_1c368075e0b3...\" ],\n \"content\": \"PHNhbWxwOkxvZ291dFJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46...\"\n}" + } } } }, @@ -32920,6 +34211,12 @@ "required": [ "query_string" ] + }, + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/saml/invalidate` to invalidate all the tokens for realm `saml1` pertaining to the user that is identified in the SAML Logout Request.\n", + "value": "{\n \"query_string\" : \"SAMLRequest=nZFda4MwFIb%2FiuS%2BmviRpqFaClKQdbvo2g12M2KMraCJ9cRR9utnW4Wyi13sMie873MeznJ1aWrnS3VQGR0j4mLkKC1NUeljjA77zYyhVbIE0dR%2By7fmaHq7U%2BdegXWGpAZ%2B%2F4pR32luBFTAtWgUcCv56%2Fp5y30X87Yz1khTIycdgpUW9kY7WdsC9zxoXTvMvWuVV98YyMnSGH2SYE5pwALBIr9QKiwDGpW0oGVUznGeMyJZKFkQ4jBf5HnhUymjIhzCAL3KNFihbYx8TBYzzGaY7EnIyZwHzCWMfiDnbRIftkSjJr%2BFu0e9v%2B0EgOquRiiZjKpiVFp6j50T4WXoyNJ%2FEWC9fdqc1t%2F1%2B2F3aUpjzhPiXpqMz1%2FHSn4A&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=MsAYz2NFdovMG2mXf6TSpu5vlQQyEJAg%2B4KCwBqJTmrb3yGXKUtIgvjqf88eCAK32v3eN8vupjPC8LglYmke1ZnjK0%2FKxzkvSjTVA7mMQe2AQdKbkyC038zzRq%2FYHcjFDE%2Bz0qISwSHZY2NyLePmwU7SexEXnIz37jKC6NMEhus%3D\",\n \"realm\" : \"saml1\"\n}" + } } } }, @@ -32951,6 +34248,12 @@ "realm", "redirect" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `POST /_security/saml/invalidate`.", + "value": "{\n \"redirect\" : \"https://my-idp.org/logout/SAMLResponse=....\",\n \"invalidated\" : 2,\n \"realm\" : \"saml1\"\n}" + } } } } @@ -32988,6 +34291,12 @@ "required": [ "token" ] + }, + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/saml/logout` to invalidate the pair of tokens that were generated by calling the SAML authenticate API with a successful SAML response.\n", + "value": "{\n \"token\" : \"46ToAxZVaXVVZTVKOVF5YU04ZFJVUDVSZlV3\",\n \"refresh_token\" : \"mJdXLtmvTUSpoLwMvdBt_w\"\n}" + } } } }, @@ -33009,6 +34318,12 @@ "required": [ "redirect" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `POST /_security/saml/logout`.", + "value": "{\n \"redirect\" : \"https://my-idp.org/logout/SAMLRequest=....\"\n}" + } } } } @@ -33047,6 +34362,18 @@ "type": "string" } } + }, + "examples": { + "RequestExample1": { + "summary": "Prepare with a realm", + "description": "Run `POST /_security/saml/prepare` to generate a SAML authentication request for the SAML realm named `saml1`.\n", + "value": "{\n \"realm\" : \"saml1\"\n}" + }, + "RequestExample2": { + "summary": "Prepare with an ACS", + "description": "Run `POST /_security/saml/prepare` to generate a SAML authentication request for the SAML realm with an Assertion Consuming Service (ACS) URL.\n", + "value": "{\n \"acs\" : \"https://kibana.org/api/security/saml/callback\"\n}" + } } } }, @@ -33077,6 +34404,12 @@ "realm", "redirect" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `POST /_security/saml/prepare`.", + "value": "{\n \"redirect\": \"https://my-idp.org/login?SAMLRequest=fVJdc6IwFP0rmbwDgUKLGbFDtc462%2B06FX3Yl50rBJsKCZsbrPbXL6J22hdfk%2FNx7zl3eL%2BvK7ITBqVWCfVdRolQuS6k2iR0mU2dmN6Phgh1FTQ8be2rehH%2FWoGWdESF%2FPST0NYorgElcgW1QG5zvkh%2FPfHAZbwx2upcV5SkiMLYzmqsFba1MAthdjIXy5enhL5a23DPOyo6W7kGBa7cwhZ2gO7G8OiW%2BR400kORt0bag7fzezAlk24eqcD2OxxlsNN5O3MdsW9c6CZnbq7rntF4d3s0D7BaHTZhIWN52P%2BcjiuGRbDU6cdj%2BEjJbJLQv4N4ADdhxBiEZbQuWclY4Q8iABbCXczCdSiKMAC%2FgyO2YqbQgrIJDZg%2FcFjsMD%2Fzb3gUcBa5sR%2F9oWR%2BzuJBqlPG14Jbn0DIf2TZ3Jn%2FXmSUrC5ddQB6bob37uZrJdeF4dIDHV3iuhb70Ptq83kOz53ubDLXlcwPJK0q%2FT42AqxIaAkVCkqm2tRgr49yfJGFU%2FZQ3hy3QyuUpd7obPv97kb%2FAQ%3D%3D\"}\",\n \"realm\": \"saml1\",\n \"id\": \"_989a34500a4f5bf0f00d195aa04a7804b4ed42a1\"\n}" + } } } } @@ -33122,6 +34455,12 @@ "required": [ "metadata" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `POST /_security/profile/u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0/_data`, which indicates that the request is acknowledged.\n", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -33217,6 +34556,18 @@ "$ref": "#/components/schemas/_types:Duration" } } + }, + "examples": { + "RequestExample1": { + "summary": "Update role and metadata", + "description": "Run `PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx` to assign new role descriptors and metadata to an API key.\n", + "value": "{\n \"role_descriptors\": {\n \"role-a\": {\n \"indices\": [\n {\n \"names\": [\"*\"],\n \"privileges\": [\"write\"]\n }\n ]\n }\n },\n \"metadata\": {\n \"environment\": {\n \"level\": 2,\n \"trusted\": true,\n \"tags\": [\"production\"]\n }\n }\n}" + }, + "RequestExample2": { + "summary": "Remove permissions", + "description": "Run `PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx` to remove the API key's previously assigned permissions. It will inherit the owner user's full permissions.\n", + "value": "{\n \"role_descriptors\": {}\n}" + } } } } @@ -33237,6 +34588,13 @@ "required": [ "updated" ] + }, + "examples": { + "ResponseExample1": { + "summary": "Update role and metadata", + "description": "A successful response from `PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx`. The API key's effective permissions after the update will be the intersection of the supplied role descriptors and the owner user's permissions.\n", + "value": "{\n \"updated\": true\n}" + } } } } @@ -33288,6 +34646,12 @@ "required": [ "access" ] + }, + "examples": { + "UpdateCrossClusterApiKeyRequestExample1": { + "description": "Run `PUT /_security/cross_cluster/api_key/VuaCfGcBCdbkQm-e5aOx` to update a cross-cluster API key, assigning it new access scope and metadata.\n", + "value": "{\n \"access\": {\n \"replication\": [\n {\n \"names\": [\"archive\"]\n }\n ]\n },\n \"metadata\": {\n \"application\": \"replication\"\n }\n}" + } } } }, @@ -33309,6 +34673,12 @@ "required": [ "updated" ] + }, + "examples": { + "UpdateCrossClusterApiKeyResponseExample1": { + "description": "A successful response from `PUT /_security/cross_cluster/api_key/VuaCfGcBCdbkQm-e5aOx` that indicates that the API key was updated.\n", + "value": "{\n \"updated\": true\n}" + } } } } @@ -33469,6 +34839,12 @@ "type", "reason" ] + }, + "examples": { + "ShutdownPutNodeRequestExample1": { + "description": "Register a node for shutdown with `PUT /_nodes/USpTGYaBSIKbgSUJR2Z9lg/shutdown`. The `restart` type prepares the node to be restarted.\n", + "value": "{\n \"type\": \"restart\",\n \"reason\": \"Demonstrating how the node shutdown API works\",\n \"allocation_delay\": \"20m\"\n}" + } } } }, @@ -33535,6 +34911,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "ShutdownDeleteNodeResponseExample1": { + "description": "A successful response from `DELETE /_nodes/USpTGYaBSIKbgSUJR2Z9lg/shutdown`.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -33751,6 +35133,18 @@ "$ref": "#/components/schemas/watcher._types:CronExpression" } } + }, + "examples": { + "PutSnapshotLifecycleRequestExample1 copy": { + "summary": "Create a policy", + "description": "Run `PUT /_slm/policy/daily-snapshots` to create a lifecycle policy. The `schedule` is when the snapshot should be taken, in this case, 1:30am daily. The `retention` details specify to: keep snapshots for 30 days; always keep at least 5 successful snapshots, even if they're more than 30 days old; keep no more than 50 successful snapshots, even if they're less than 30 days old.\n", + "value": "{\n \"schedule\": \"0 30 1 * * ?\",\n \"name\": \"\",\n \"repository\": \"my_repository\",\n \"config\": {\n \"indices\": [\"data-*\", \"important\"],\n \"ignore_unavailable\": false,\n \"include_global_state\": false\n },\n \"retention\": {\n \"expire_after\": \"30d\",\n \"min_count\": 5,\n \"max_count\": 50\n }\n}" + }, + "PutSnapshotLifecycleRequestExample2": { + "summary": "Create a policy with intevals", + "description": "Run `PUT /_slm/policy/hourly-snapshots` to create a lifecycle policy that uses interval scheduling. It creates a snapshot once every hour. The first snapshot will be created one hour after the policy is modified, with subsequent snapshots every hour afterward.\n", + "value": "{\n \"schedule\": \"1h\",\n \"name\": \"\",\n \"repository\": \"my_repository\",\n \"config\": {\n \"indices\": [\"data-*\", \"important\"]\n }\n}" + } } } } @@ -33880,6 +35274,12 @@ "required": [ "snapshot_name" ] + }, + "examples": { + "ExecuteSnapshotLifecycleResponseExample1": { + "description": "Run `POST /_slm/policy/daily-snapshots/_execute` to take an immediate snapshot according to the `daily-snapshots` policy.\n", + "value": "{\n \"snapshot_name\": \"daily-snap-2019.04.24-gwrqoo2xtea3q57vvg0uea\"\n}" + } } } } @@ -34041,6 +35441,12 @@ "total_snapshots_taken", "policy_stats" ] + }, + "examples": { + "GetSnapshotLifecycleManagementStatsResponseExample1": { + "description": "A successful response from `GET /_slm/stats`.", + "value": "{\n \"retention_runs\": 13,\n \"retention_failed\": 0,\n \"retention_timed_out\": 0,\n \"retention_deletion_time\": \"1.4s\",\n \"retention_deletion_time_millis\": 1404,\n \"policy_stats\": [ ],\n \"total_snapshots_taken\": 1,\n \"total_snapshots_failed\": 1,\n \"total_snapshots_deleted\": 0,\n \"total_snapshot_deletion_failures\": 0\n}" + } } } } @@ -34093,6 +35499,12 @@ "required": [ "operation_mode" ] + }, + "examples": { + "GetSnapshotLifecycleManagementStatusResponseExample1": { + "description": "A successful response from `GET _slm/status`.", + "value": "{\n \"operation_mode\": \"RUNNING\"\n}" + } } } } @@ -34138,6 +35550,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "StartSnapshotLifecycleManagementResponseExample1": { + "description": "A successful response from `POST _slm/start`.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -34250,6 +35668,12 @@ "required": [ "results" ] + }, + "examples": { + "SnapshotCleanupRepositoryResponseExample1": { + "description": "A successful response from `POST /_snapshot/my_repository/_cleanup`.", + "value": "{\n \"results\": {\n \"deleted_bytes\": 20,\n \"deleted_blobs\": 5\n }\n}" + } } } } @@ -34335,6 +35759,12 @@ "required": [ "indices" ] + }, + "examples": { + "SnapshotCloneRequestExample1": { + "description": "Run `PUT /_snapshot/my_repository/source_snapshot/_clone/target_snapshot` to clone the `source_snapshot` into a new `target_snapshot`.", + "value": "{\n \"indices\": \"index_a,index_b\"\n}" + } } } }, @@ -34554,6 +35984,12 @@ "remaining", "total" ] + }, + "examples": { + "SnapshotGetResponseExample1": { + "description": "A successful response from `GET /_snapshot/my_repository/snapshot_*?sort=start_time&from_sort_value=1577833200000`. The response contains information for all snapshots with names starting with `snapshot_` and that started on or after timestamp `1577833200000` (Jan 1st 2020) when sorted by snapshot start time in the default ascending order.\n", + "value": "{\n \"snapshots\": [\n {\n \"snapshot\": \"snapshot_1\",\n \"uuid\": \"dKb54xw67gvdRctLCxSket\",\n \"repository\": \"my_repository\",\n \"version_id\": ,\n \"version\": ,\n \"indices\": [],\n \"data_streams\": [],\n \"feature_states\": [],\n \"include_global_state\": true,\n \"state\": \"SUCCESS\",\n \"start_time\": \"2020-07-06T21:55:18.128Z\",\n \"start_time_in_millis\": 1593093628849,\n \"end_time\": \"2020-07-06T21:55:18.129Z\",\n \"end_time_in_millis\": 1593093628850,\n \"duration_in_millis\": 1,\n \"failures\": [],\n \"shards\": {\n \"total\": 0,\n \"failed\": 0,\n \"successful\": 0\n }\n },\n {\n \"snapshot\": \"snapshot_2\",\n \"uuid\": \"vdRctLCxSketdKb54xw67g\",\n \"repository\": \"my_repository\",\n \"version_id\": ,\n \"version\": ,\n \"indices\": [],\n \"data_streams\": [],\n \"feature_states\": [],\n \"include_global_state\": true,\n \"state\": \"SUCCESS\",\n \"start_time\": \"2020-07-06T21:55:18.130Z\",\n \"start_time_in_millis\": 1593093628851,\n \"end_time\": \"2020-07-06T21:55:18.130Z\",\n \"end_time_in_millis\": 1593093628851,\n \"duration_in_millis\": 0,\n \"failures\": [],\n \"shards\": {\n \"total\": 0,\n \"failed\": 0,\n \"successful\": 0\n }\n },\n {\n \"snapshot\": \"snapshot_3\",\n \"uuid\": \"dRctdKb54xw67gvLCxSket\",\n \"repository\": \"my_repository\",\n \"version_id\": ,\n \"version\": ,\n \"indices\": [],\n \"data_streams\": [],\n \"feature_states\": [],\n \"include_global_state\": true,\n \"state\": \"SUCCESS\",\n \"start_time\": \"2020-07-06T21:55:18.131Z\",\n \"start_time_in_millis\": 1593093628852,\n \"end_time\": \"2020-07-06T21:55:18.135Z\",\n \"end_time_in_millis\": 1593093628856,\n \"duration_in_millis\": 4,\n \"failures\": [],\n \"shards\": {\n \"total\": 0,\n \"failed\": 0,\n \"successful\": 0\n }\n }\n ],\n \"total\": 3,\n \"remaining\": 0\n}" + } } } } @@ -34676,6 +36112,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "SnapshotDeleteResponseExample1": { + "description": "A successful response from `DELETE /_snapshot/my_repository/snapshot_2,snapshot_3`. The request deletes `snapshot_2` and `snapshot_3` from the repository named `my_repository`.\n", + "value": "{\n \"acknowledged\" : true\n}" + } } } } @@ -35336,6 +36778,18 @@ "type": "string" } } + }, + "examples": { + "SnapshotRestoreRequestExample1": { + "summary": "Restore with rename pattern", + "description": "Run `POST /_snapshot/my_repository/snapshot_2/_restore?wait_for_completion=true`. It restores `index_1` and `index_2` from `snapshot_2`. The `rename_pattern` and `rename_replacement` parameters indicate any index matching the regular expression `index_(.+)` will be renamed using the pattern `restored_index_$1`. For example, `index_1` will be renamed to `restored_index_1`.\n", + "value": "{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": false,\n \"rename_pattern\": \"index_(.+)\",\n \"rename_replacement\": \"restored_index_$1\",\n \"include_aliases\": false\n}" + }, + "SnapshotRestoreRequestExample2": { + "summary": "Restore in-place", + "description": "Close `index_1` then run `POST /_snapshot/my_repository/snapshot_2/_restore?wait_for_completion=true` to restore an index in-place. For example, you might want to perform this type of restore operation when no alternative options surface after the cluster allocation explain API reports `no_valid_shard_copy`.\n", + "value": "{\n \"indices\": \"index_1\"\n}" + } } } } @@ -35536,6 +36990,12 @@ "required": [ "cursor" ] + }, + "examples": { + "ClearSqlCursorRequestExample1": { + "description": "Run `POST _sql/close` to clear an SQL search cursor.", + "value": "{\n \"cursor\": \"sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=\"\n}" + } } } }, @@ -35882,6 +37342,12 @@ "items": { "$ref": "#/components/schemas/ssl.certificates:CertificateInformation" } + }, + "examples": { + "GetCertificatesResponseExample1": { + "description": "A successful response from `GET /_ssl/certificates`, which provides information about the certificates on a single node of Elasticsearch.\n", + "value": "[\n {\n \"path\": \"certs/elastic-certificates.p12\",\n \"format\": \"PKCS12\",\n \"alias\": \"instance\",\n \"subject_dn\": \"CN=Elastic Certificate Tool Autogenerated CA\",\n \"serial_number\": \"a20f0ee901e8f69dc633ff633e5cd5437cdb4137\",\n \"has_private_key\": false,\n \"expiry\": \"2021-01-15T20:42:49.000Z\"\n },\n {\n \"path\": \"certs/elastic-certificates.p12\",\n \"format\": \"PKCS12\",\n \"alias\": \"ca\",\n \"subject_dn\": \"CN=Elastic Certificate Tool Autogenerated CA\",\n \"serial_number\": \"a20f0ee901e8f69dc633ff633e5cd5437cdb4137\",\n \"has_private_key\": false,\n \"expiry\": \"2021-01-15T20:42:49.000Z\"\n },\n {\n \"path\": \"certs/elastic-certificates.p12\",\n \"format\": \"PKCS12\",\n \"alias\": \"instance\",\n \"subject_dn\": \"CN=instance\",\n \"serial_number\": \"fc1905e1494dc5230218d079c47a617088f84ce0\",\n \"has_private_key\": true,\n \"expiry\": \"2021-01-15T20:44:32.000Z\"\n }\n]" + } } } } @@ -35954,6 +37420,12 @@ "count", "synonyms_set" ] + }, + "examples": { + "SynonymsGetResponseExample1": { + "description": "A successful response from `GET _synonyms/my-synonyms-set`.", + "value": "{\n \"count\": 3,\n \"synonyms_set\": [\n {\n \"id\": \"test-1\",\n \"synonyms\": \"hello, hi\"\n },\n {\n \"id\": \"test-2\",\n \"synonyms\": \"bye, goodbye\"\n },\n {\n \"id\": \"test-3\",\n \"synonyms\": \"test => check\"\n }\n ]\n}" + } } } } @@ -36110,6 +37582,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/synonyms._types:SynonymRuleRead" + }, + "examples": { + "SynonymRuleGetResponseExample1": { + "description": "A successful response from `GET _synonyms/my-synonyms-set/test-1`.", + "value": "{\n \"id\": \"test-1\",\n \"synonyms\": \"hello, hi\"\n}" + } } } } @@ -36161,6 +37639,13 @@ "required": [ "synonyms" ] + }, + "examples": { + "SynonymRulePutRequestExample1": { + "summary": "synonyms/apis/put-synonym-rule.asciidoc:107", + "description": "", + "value": "{\n \"synonyms\": \"hello, hi, howdy\"\n}" + } } } }, @@ -36173,6 +37658,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/synonyms._types:SynonymsUpdateResult" + }, + "examples": { + "SynonymRuleResponseExample1": { + "description": "A successful response from `PUT _synonyms/my-synonyms-set/test-1`.\n", + "value": "{\n \"result\": \"updated\",\n \"reload_analyzers_details\": {\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"reload_details\": [\n {\n \"index\": \"test-index\",\n \"reloaded_analyzers\": [\n \"my_search_analyzer\"\n ],\n \"reloaded_node_ids\": [\n \"1wYFZzq8Sxeu_Jvt9mlbkg\"\n ]\n }\n ]\n }\n}" + } } } } @@ -36218,6 +37709,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/synonyms._types:SynonymsUpdateResult" + }, + "examples": { + "SynonymRuleDeleteResponseExample1": { + "description": "A successful response from `DELETE _synonyms/my-synonyms-set/test-1`. All analyzers using this synonyms set will be reloaded automatically to reflect the rule being deleted.\n", + "value": "{\n \"result\": \"deleted\",\n \"reload_analyzers_details\": {\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"reload_details\": [\n {\n \"index\": \"test-index\",\n \"reloaded_analyzers\": [\n \"my_search_analyzer\"\n ],\n \"reloaded_node_ids\": [\n \"1wYFZzq8Sxeu_Jvt9mlbkg\"\n ]\n }\n ]\n }\n}" + } } } } @@ -36280,6 +37777,12 @@ "count", "results" ] + }, + "examples": { + "SynonymsSetsGetResponseExample1": { + "description": "A successful response from `GET _synonyms`.", + "value": "{\n \"count\": 3,\n \"results\": [\n {\n \"synonyms_set\": \"ecommerce-synonyms\",\n \"count\": 2\n },\n {\n \"synonyms_set\": \"my-synonyms-set\",\n \"count\": 3\n },\n {\n \"synonyms_set\": \"new-ecommerce-synonyms\",\n \"count\": 1\n }\n ]\n}" + } } } } @@ -37028,6 +38531,12 @@ "num_messages_analyzed", "sample_start" ] + }, + "examples": { + "FindFieldStructureResponseExample1": { + "description": "A successful response from `GET _text_structure/find_field_structure?index=test-logs&field=message`.", + "value": "{\n \"num_lines_analyzed\" : 22,\n \"num_messages_analyzed\" : 22,\n \"sample_start\" : \"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\\n[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]\\n\",\n \"charset\" : \"UTF-8\",\n \"format\" : \"semi_structured_text\",\n \"multiline_start_pattern\" : \"^\\\\[\\\\b\\\\d{4}-\\\\d{2}-\\\\d{2}[T ]\\\\d{2}:\\\\d{2}\",\n \"grok_pattern\" : \"\\\\[%{TIMESTAMP_ISO8601:timestamp}\\\\]\\\\[%{LOGLEVEL:loglevel} \\\\]\\\\[.*\",\n \"ecs_compatibility\" : \"disabled\",\n \"timestamp_field\" : \"timestamp\",\n \"joda_timestamp_formats\" : [\n \"ISO8601\"\n ],\n \"java_timestamp_formats\" : [\n \"ISO8601\"\n ],\n \"need_client_timezone\" : true,\n \"mappings\" : {\n \"properties\" : {\n \"@timestamp\" : {\n \"type\" : \"date\"\n },\n \"loglevel\" : {\n \"type\" : \"keyword\"\n },\n \"message\" : {\n \"type\" : \"text\"\n }\n }\n },\n \"ingest_pipeline\" : {\n \"description\" : \"Ingest pipeline created by text structure finder\",\n \"processors\" : [\n {\n \"grok\" : {\n \"field\" : \"message\",\n \"patterns\" : [\n \"\\\\[%{TIMESTAMP_ISO8601:timestamp}\\\\]\\\\[%{LOGLEVEL:loglevel} \\\\]\\\\[.*\"\n ],\n \"ecs_compatibility\" : \"disabled\"\n }\n },\n {\n \"date\" : {\n \"field\" : \"timestamp\",\n \"timezone\" : \"{{ event.timezone }}\",\n \"formats\" : [\n \"ISO8601\"\n ]\n }\n },\n {\n \"remove\" : {\n \"field\" : \"timestamp\"\n }\n }\n ]\n },\n \"field_stats\" : {\n \"loglevel\" : {\n \"count\" : 22,\n \"cardinality\" : 1,\n \"top_hits\" : [\n {\n \"value\" : \"INFO\",\n \"count\" : 22\n }\n ]\n },\n \"message\" : {\n \"count\" : 22,\n \"cardinality\" : 22,\n \"top_hits\" : [\n {\n \"value\" : \"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]\",\n \"count\" : 1\n }\n ]\n },\n \"timestamp\" : {\n \"count\" : 22,\n \"cardinality\" : 14,\n \"earliest\" : \"2024-03-05T10:52:36,256\",\n \"latest\" : \"2024-03-05T10:52:49,199\",\n \"top_hits\" : [\n {\n \"value\" : \"2024-03-05T10:52:41,044\",\n \"count\" : 6\n },\n {\n \"value\" : \"2024-03-05T10:52:41,043\",\n \"count\" : 3\n },\n {\n \"value\" : \"2024-03-05T10:52:41,059\",\n \"count\" : 2\n },\n {\n \"value\" : \"2024-03-05T10:52:36,256\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:41,038\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:41,042\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:43,291\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:46,098\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:47,227\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:47,259\",\n \"count\" : 1\n }\n ]\n }\n }\n}" + } } } } @@ -37307,6 +38816,12 @@ "items": { "type": "object" } + }, + "examples": { + "FindStructureRequestExample1": { + "description": "Run `POST _text_structure/find_structure` to analyze newline-delimited JSON text.", + "value": "{\"name\": \"Leviathan Wakes\", \"author\": \"James S.A. Corey\", \"release_date\": \"2011-06-02\", \"page_count\": 561}\n{\"name\": \"Hyperion\", \"author\": \"Dan Simmons\", \"release_date\": \"1989-05-26\", \"page_count\": 482}\n{\"name\": \"Dune\", \"author\": \"Frank Herbert\", \"release_date\": \"1965-06-01\", \"page_count\": 604}\n{\"name\": \"Dune Messiah\", \"author\": \"Frank Herbert\", \"release_date\": \"1969-10-15\", \"page_count\": 331}\n{\"name\": \"Children of Dune\", \"author\": \"Frank Herbert\", \"release_date\": \"1976-04-21\", \"page_count\": 408}\n{\"name\": \"God Emperor of Dune\", \"author\": \"Frank Herbert\", \"release_date\": \"1981-05-28\", \"page_count\": 454}\n{\"name\": \"Consider Phlebas\", \"author\": \"Iain M. Banks\", \"release_date\": \"1987-04-23\", \"page_count\": 471}\n{\"name\": \"Pandora's Star\", \"author\": \"Peter F. Hamilton\", \"release_date\": \"2004-03-02\", \"page_count\": 768}\n{\"name\": \"Revelation Space\", \"author\": \"Alastair Reynolds\", \"release_date\": \"2000-03-15\", \"page_count\": 585}\n{\"name\": \"A Fire Upon the Deep\", \"author\": \"Vernor Vinge\", \"release_date\": \"1992-06-01\", \"page_count\": 613}\n{\"name\": \"Ender's Game\", \"author\": \"Orson Scott Card\", \"release_date\": \"1985-06-01\", \"page_count\": 324}\n{\"name\": \"1984\", \"author\": \"George Orwell\", \"release_date\": \"1985-06-01\", \"page_count\": 328}\n{\"name\": \"Fahrenheit 451\", \"author\": \"Ray Bradbury\", \"release_date\": \"1953-10-15\", \"page_count\": 227}\n{\"name\": \"Brave New World\", \"author\": \"Aldous Huxley\", \"release_date\": \"1932-06-01\", \"page_count\": 268}\n{\"name\": \"Foundation\", \"author\": \"Isaac Asimov\", \"release_date\": \"1951-06-01\", \"page_count\": 224}\n{\"name\": \"The Giver\", \"author\": \"Lois Lowry\", \"release_date\": \"1993-04-26\", \"page_count\": 208}\n{\"name\": \"Slaughterhouse-Five\", \"author\": \"Kurt Vonnegut\", \"release_date\": \"1969-06-01\", \"page_count\": 275}\n{\"name\": \"The Hitchhiker's Guide to the Galaxy\", \"author\": \"Douglas Adams\", \"release_date\": \"1979-10-12\", \"page_count\": 180}\n{\"name\": \"Snow Crash\", \"author\": \"Neal Stephenson\", \"release_date\": \"1992-06-01\", \"page_count\": 470}\n{\"name\": \"Neuromancer\", \"author\": \"William Gibson\", \"release_date\": \"1984-07-01\", \"page_count\": 271}\n{\"name\": \"The Handmaid's Tale\", \"author\": \"Margaret Atwood\", \"release_date\": \"1985-06-01\", \"page_count\": 311}\n{\"name\": \"Starship Troopers\", \"author\": \"Robert A. Heinlein\", \"release_date\": \"1959-12-01\", \"page_count\": 335}\n{\"name\": \"The Left Hand of Darkness\", \"author\": \"Ursula K. Le Guin\", \"release_date\": \"1969-06-01\", \"page_count\": 304}\n{\"name\": \"The Moon is a Harsh Mistress\", \"author\": \"Robert A. Heinlein\", \"release_date\": \"1966-04-01\", \"page_count\": 288}" + } } } }, @@ -37425,6 +38940,12 @@ "num_lines_analyzed", "ingest_pipeline" ] + }, + "examples": { + "FindStructureResponseExample1": { + "description": "A successful response from `POST _text_structure/find_structure`.", + "value": "{\n \"num_lines_analyzed\" : 24,\n \"num_messages_analyzed\" : 24,\n \"sample_start\" : \"{\\\"name\\\": \\\"Leviathan Wakes\\\", \\\"author\\\": \\\"James S.A. Corey\\\", \\\"release_date\\\": \\\"2011-06-02\\\", \\\"page_count\\\": 561}\\n{\\\"name\\\": \\\"Hyperion\\\", \\\"author\\\": \\\"Dan Simmons\\\", \\\"release_date\\\": \\\"1989-05-26\\\", \\\"page_count\\\": 482}\\n\",\n \"charset\" : \"UTF-8\",\n \"has_byte_order_marker\" : false,\n \"format\" : \"ndjson\",\n \"ecs_compatibility\" : \"disabled\",\n \"timestamp_field\" : \"release_date\",\n \"joda_timestamp_formats\" : [\n \"ISO8601\"\n ],\n \"java_timestamp_formats\" : [\n \"ISO8601\"\n ],\n \"need_client_timezone\" : true,\n \"mappings\" : {\n \"properties\" : {\n \"@timestamp\" : {\n \"type\" : \"date\"\n },\n \"author\" : {\n \"type\" : \"keyword\"\n },\n \"name\" : {\n \"type\" : \"keyword\"\n },\n \"page_count\" : {\n \"type\" : \"long\"\n },\n \"release_date\" : {\n \"type\" : \"date\",\n \"format\" : \"iso8601\"\n }\n }\n },\n \"ingest_pipeline\" : {\n \"description\" : \"Ingest pipeline created by text structure finder\",\n \"processors\" : [\n {\n \"date\" : {\n \"field\" : \"release_date\",\n \"timezone\" : \"{{ event.timezone }}\",\n \"formats\" : [\n \"ISO8601\"\n ]\n }\n }\n ]\n },\n \"field_stats\" : {\n \"author\" : {\n \"count\" : 24,\n \"cardinality\" : 20,\n \"top_hits\" : [\n {\n \"value\" : \"Frank Herbert\",\n \"count\" : 4\n },\n {\n \"value\" : \"Robert A. Heinlein\",\n \"count\" : 2\n },\n {\n \"value\" : \"Alastair Reynolds\",\n \"count\" : 1\n },\n {\n \"value\" : \"Aldous Huxley\",\n \"count\" : 1\n },\n {\n \"value\" : \"Dan Simmons\",\n \"count\" : 1\n },\n {\n \"value\" : \"Douglas Adams\",\n \"count\" : 1\n },\n {\n \"value\" : \"George Orwell\",\n \"count\" : 1\n },\n {\n \"value\" : \"Iain M. Banks\",\n \"count\" : 1\n },\n {\n \"value\" : \"Isaac Asimov\",\n \"count\" : 1\n },\n {\n \"value\" : \"James S.A. Corey\",\n \"count\" : 1\n }\n ]\n },\n \"name\" : {\n \"count\" : 24,\n \"cardinality\" : 24,\n \"top_hits\" : [\n {\n \"value\" : \"1984\",\n \"count\" : 1\n },\n {\n \"value\" : \"A Fire Upon the Deep\",\n \"count\" : 1\n },\n {\n \"value\" : \"Brave New World\",\n \"count\" : 1\n },\n {\n \"value\" : \"Children of Dune\",\n \"count\" : 1\n },\n {\n \"value\" : \"Consider Phlebas\",\n \"count\" : 1\n },\n {\n \"value\" : \"Dune\",\n \"count\" : 1\n },\n {\n \"value\" : \"Dune Messiah\",\n \"count\" : 1\n },\n {\n \"value\" : \"Ender's Game\",\n \"count\" : 1\n },\n {\n \"value\" : \"Fahrenheit 451\",\n \"count\" : 1\n },\n {\n \"value\" : \"Foundation\",\n \"count\" : 1\n }\n ]\n },\n \"page_count\" : {\n \"count\" : 24,\n \"cardinality\" : 24,\n \"min_value\" : 180,\n \"max_value\" : 768,\n \"mean_value\" : 387.0833333333333,\n \"median_value\" : 329.5,\n \"top_hits\" : [\n {\n \"value\" : 180,\n \"count\" : 1\n },\n {\n \"value\" : 208,\n \"count\" : 1\n },\n {\n \"value\" : 224,\n \"count\" : 1\n },\n {\n \"value\" : 227,\n \"count\" : 1\n },\n {\n \"value\" : 268,\n \"count\" : 1\n },\n {\n \"value\" : 271,\n \"count\" : 1\n },\n {\n \"value\" : 275,\n \"count\" : 1\n },\n {\n \"value\" : 288,\n \"count\" : 1\n },\n {\n \"value\" : 304,\n \"count\" : 1\n },\n {\n \"value\" : 311,\n \"count\" : 1\n }\n ]\n },\n \"release_date\" : {\n \"count\" : 24,\n \"cardinality\" : 20,\n \"earliest\" : \"1932-06-01\",\n \"latest\" : \"2011-06-02\",\n \"top_hits\" : [\n {\n \"value\" : \"1985-06-01\",\n \"count\" : 3\n },\n {\n \"value\" : \"1969-06-01\",\n \"count\" : 2\n },\n {\n \"value\" : \"1992-06-01\",\n \"count\" : 2\n },\n {\n \"value\" : \"1932-06-01\",\n \"count\" : 1\n },\n {\n \"value\" : \"1951-06-01\",\n \"count\" : 1\n },\n {\n \"value\" : \"1953-10-15\",\n \"count\" : 1\n },\n {\n \"value\" : \"1959-12-01\",\n \"count\" : 1\n },\n {\n \"value\" : \"1965-06-01\",\n \"count\" : 1\n },\n {\n \"value\" : \"1966-04-01\",\n \"count\" : 1\n },\n {\n \"value\" : \"1969-10-15\",\n \"count\" : 1\n }\n ]\n }\n }\n}" + } } } } @@ -37599,6 +39120,18 @@ "dest", "source" ] + }, + "examples": { + "PutTransformRequestExample1": { + "summary": "A pivot transform", + "description": "Run `PUT _transform/ecommerce_transform1` to create a transform that uses the pivot method.", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\",\n \"query\": {\n \"term\": {\n \"geoip.continent_name\": {\n \"value\": \"Asia\"\n }\n }\n }\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\",\n \"missing_bucket\": true\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n },\n \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform1\",\n \"pipeline\": \"add_timestamp_pipeline\"\n },\n \"frequency\": \"5m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"60s\"\n }\n },\n \"retention_policy\": {\n \"time\": {\n \"field\": \"order_date\",\n \"max_age\": \"30d\"\n }\n }\n}" + }, + "PutTransformRequestExample2": { + "summary": "A latest transform", + "description": "Run `PUT _transform/ecommerce_transform2` to create a transform that uses the latest method.", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\"\n },\n \"latest\": {\n \"unique_key\": [\n \"customer_id\"\n ],\n \"sort\": \"order_date\"\n },\n \"description\": \"Latest order for each customer\",\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform2\"\n },\n \"frequency\": \"5m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"60s\"\n }\n }\n}" + } } } }, @@ -37611,6 +39144,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "PutTransformResponseExample1": { + "description": "A successful response when creating a transform.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -37674,6 +39213,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "DeleteTransformResponseExample1": { + "description": "A successful response when the transform is deleted.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -37795,6 +39340,12 @@ "count", "transforms" ] + }, + "examples": { + "GetTransformStatsResponseExample1": { + "description": "A successful response that contains usage information for a transform.", + "value": "{\n \"count\": 1,\n \"transforms\": [\n {\n \"id\": \"ecommerce-customer-transform\",\n \"state\": \"started\",\n \"node\": {\n \"id\": \"cpTIGMsVQ8Gqwqlxxxxxxx\",\n \"name\": \"my.home\",\n \"ephemeral_id\": \"5-L21nFsQxxxxxxxxxx-xx\",\n \"transport_address\": \"127.0.0.1:9300\",\n \"attributes\": {}\n },\n \"stats\": {\n \"pages_processed\": 78,\n \"documents_processed\": 6027,\n \"documents_indexed\": 68,\n \"documents_deleted\": 22,\n \"delete_time_in_ms\": 214,\n \"trigger_count\": 168,\n \"index_time_in_ms\": 412,\n \"index_total\": 20,\n \"index_failures\": 0,\n \"search_time_in_ms\": 353,\n \"search_total\": 78,\n \"search_failures\": 0,\n \"processing_time_in_ms\": 8,\n \"processing_total\": 78,\n \"exponential_avg_checkpoint_duration_ms\": 97.30637923893185,\n \"exponential_avg_documents_indexed\": 2.2064915040974062,\n \"exponential_avg_documents_processed\": 179.89419945785045\n },\n \"checkpointing\": {\n \"last\": {\n \"checkpoint\": 20,\n \"timestamp_millis\": 1585344558220,\n \"time_upper_bound_millis\": 1585344498220\n },\n \"changes_last_detected_at\": 1585344558219\n },\n \"health\": {\n \"status\": \"green\"\n }\n }\n ]\n}" + } } } } @@ -37949,6 +39500,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "ResetTransformResponseExample1": { + "description": "A successful response when the transform is reset.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -37995,6 +39552,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "ScheduleNowTransformResponseExample1": { + "description": "A successful response when the transform is scheduled to run now.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -38051,6 +39614,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "StartTransformResponseExample1": { + "description": "A successful response when a transform starts.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -38137,6 +39706,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "StopTransformResponseExample1": { + "description": "A successful response when a transform stops.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -38227,6 +39802,12 @@ ] } } + }, + "examples": { + "UpdateTransformRequestExample1": { + "description": "Run `POST _transform/simple-kibana-ecomm-pivot/_update` to update a transform that uses the pivot method.", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\",\n \"query\": {\n \"term\": {\n \"geoip.continent_name\": {\n \"value\": \"Asia\"\n }\n }\n }\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\",\n \"missing_bucket\": true\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n },\n \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform1\",\n \"pipeline\": \"add_timestamp_pipeline\"\n },\n \"frequency\": \"5m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"60s\"\n }\n },\n \"retention_policy\": {\n \"time\": {\n \"field\": \"order_date\",\n \"max_age\": \"30d\"\n }\n }\n}" + } } } }, @@ -38292,6 +39873,12 @@ "source", "version" ] + }, + "examples": { + "UpdateTransformResponseExample1": { + "description": "A successful response when creating a transform.", + "value": "{\n \"id\": \"simple-kibana-ecomm-pivot\",\n \"authorization\": {\n \"roles\": [\n \"superuser\"\n ]\n },\n \"version\": \"10.0.0\",\n \"create_time\": 1712951576767,\n \"source\": {\n \"index\": [\n \"kibana_sample_data_ecommerce\"\n ],\n \"query\": {\n \"term\": {\n \"geoip.continent_name\": {\n \"value\": \"Asia\"\n }\n }\n }\n },\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform_v2\",\n \"pipeline\": \"add_timestamp_pipeline\"\n },\n \"frequency\": \"15m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"120s\"\n }\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\",\n \"missing_bucket\": true\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n },\n \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n \"settings\": {},\n \"retention_policy\": {\n \"time\": {\n \"field\": \"order_date\",\n \"max_age\": \"30d\"\n }\n }\n}" + } } } } @@ -38356,6 +39943,12 @@ "no_action", "updated" ] + }, + "examples": { + "UpgradeTransformResponseExample1": { + "description": "A successful response contains a summary when all transforms are upgraded.", + "value": "{\n \"needs_update\": 0,\n \"updated\": 2,\n \"no_action\": 1\n}" + } } } } @@ -38559,6 +40152,63 @@ "type": "object" } } + }, + "examples": { + "UpdateRequestExample1": { + "summary": "Update a counter with a script", + "description": "Run `POST test/_update/1` to increment a counter by using a script.", + "value": "{\n \"script\" : {\n \"source\": \"ctx._source.counter += params.count\",\n \"lang\": \"painless\",\n \"params\" : {\n \"count\" : 4\n }\n }\n}" + }, + "UpdateRequestExample10": { + "summary": "Scripted upsert", + "description": "Run `POST test/_update/1` to perform a scripted upsert. When `scripted_upsert` is `true`, the script runs whether or not the document exists.\n", + "value": "{\n \"scripted_upsert\": true,\n \"script\": {\n \"source\": \"\"\"\n if ( ctx.op == 'create' ) {\n ctx._source.counter = params.count\n } else {\n ctx._source.counter += params.count\n }\n \"\"\",\n \"params\": {\n \"count\": 4\n }\n },\n \"upsert\": {}\n}" + }, + "UpdateRequestExample11": { + "summary": "Doc as upsert", + "description": "Run `POST test/_update/1` to perform a doc as upsert. Instead of sending a partial `doc` plus an `upsert` doc, you can set `doc_as_upsert` to `true` to use the contents of `doc` as the `upsert` value.\n", + "value": "{\n \"doc\": {\n \"name\": \"new_name\"\n },\n \"doc_as_upsert\": true\n}" + }, + "UpdateRequestExample2": { + "summary": "Add a tag with a script", + "description": "Run `POST test/_update/1` to use a script to add a tag to a list of tags. In this example, it is just a list, so the tag is added even it exists.\n", + "value": "{\n \"script\": {\n \"source\": \"ctx._source.tags.add(params.tag)\",\n \"lang\": \"painless\",\n \"params\": {\n \"tag\": \"blue\"\n }\n }\n}" + }, + "UpdateRequestExample3": { + "summary": "Remove a tag with a script", + "description": "Run `POST test/_update/1` to use a script to remove a tag from a list of tags. The Painless function to remove a tag takes the array index of the element you want to remove. To avoid a possible runtime error, you first need to make sure the tag exists. If the list contains duplicates of the tag, this script just removes one occurrence.\n", + "value": "{\n \"script\": {\n \"source\": \"if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }\",\n \"lang\": \"painless\",\n \"params\": {\n \"tag\": \"blue\"\n }\n }\n}" + }, + "UpdateRequestExample4": { + "summary": "Add fields with a script", + "description": "Run `POST test/_update/1` to use a script to add a field `new_field` to the document.\n", + "value": "{\n \"script\" : \"ctx._source.new_field = 'value_of_new_field'\"\n}" + }, + "UpdateRequestExample5": { + "summary": "Remove fields with a script", + "description": "Run `POST test/_update/1` to use a script to remove a field `new_field` from the document.\n", + "value": "{\n \"script\" : \"ctx._source.remove('new_field')\"\n}" + }, + "UpdateRequestExample6": { + "summary": "Remove subfields with a script", + "description": "Run `POST test/_update/1` to use a script to remove a subfield from an object field.\n", + "value": "{\n \"script\": \"ctx._source['my-object'].remove('my-subfield')\"\n}" + }, + "UpdateRequestExample7": { + "summary": "Change the operation with a script", + "description": "Run `POST test/_update/1` to change the operation that runs from within the script. For example, this request deletes the document if the `tags` field contains `green`, otherwise it does nothing (`noop`).\n", + "value": "{\n \"script\": {\n \"source\": \"if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'noop' }\",\n \"lang\": \"painless\",\n \"params\": {\n \"tag\": \"green\"\n }\n }\n}" + }, + "UpdateRequestExample8": { + "summary": "Update part of a document", + "description": "Run `POST test/_update/1` to do a partial update that adds a new field to the existing document.\n", + "value": "{\n \"doc\": {\n \"name\": \"new_name\"\n }\n}" + }, + "UpdateRequestExample9": { + "summary": "Upsert", + "description": "Run `POST test/_update/1` to perfom an upsert. If the document does not already exist, the contents of the upsert element are inserted as a new document. If the document exists, the script is run.\n", + "value": "{\n \"script\": {\n \"source\": \"ctx._source.counter += params.count\",\n \"lang\": \"painless\",\n \"params\": {\n \"count\": 4\n }\n },\n \"upsert\": {\n \"counter\": 1\n }\n}" + } } } }, @@ -38571,6 +40221,13 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_global.update:UpdateWriteResponseBase" + }, + "examples": { + "UpdateResponseExample1": { + "summary": "Detect noop updates", + "description": "By default updates that don't change anything detect that they don't change anything and return `\"result\": \"noop\"`.\n", + "value": "{\n \"_shards\": {\n \"total\": 0,\n \"successful\": 0,\n \"failed\": 0\n },\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"_version\": 2,\n \"_primary_term\": 1,\n \"_seq_no\": 1,\n \"result\": \"noop\"\n}" + } } } } @@ -38938,6 +40595,28 @@ "$ref": "#/components/schemas/_types:Conflicts" } } + }, + "examples": { + "UpdateByQueryRequestExample1": { + "summary": "Update selected documents", + "description": "Run `POST my-index-000001/_update_by_query?conflicts=proceed` to update documents that match a query.\n", + "value": "{\n \"query\": { \n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n}" + }, + "UpdateByQueryRequestExample2": { + "summary": "Update the document source", + "description": "Run `POST my-index-000001/_update_by_query` with a script to update the document source. It increments the `count` field for all documents with a `user.id` of `kimchy` in `my-index-000001`.\n", + "value": "{\n \"script\": {\n \"source\": \"ctx._source.count++\",\n \"lang\": \"painless\"\n },\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n}" + }, + "UpdateByQueryRequestExample3": { + "summary": "Slice manually", + "description": "Run `POST my-index-000001/_update_by_query` to slice an update by query manually. Provide a slice ID and total number of slices to each request.\n", + "value": "{\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n },\n \"script\": {\n \"source\": \"ctx._source['extra'] = 'test'\"\n }\n}" + }, + "UpdateByQueryRequestExample4": { + "summary": "Slice automatically", + "description": "Run `POST my-index-000001/_update_by_query?refresh&slices=5` to use automatic slicing. It automatically parallelizes using sliced scroll to slice on `_id`.\n", + "value": "{\n \"script\": {\n \"source\": \"ctx._source['extra'] = 'test'\"\n }\n}" + } } } } @@ -39300,6 +40979,12 @@ "found", "_id" ] + }, + "examples": { + "GetWatchResponseExample1": { + "description": "A successful response from `GET _watcher/watch/my_watch`.", + "value": "{\n \"found\": true,\n \"_id\": \"my_watch\",\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"_version\": 1,\n \"status\": { \n \"version\": 1,\n \"state\": {\n \"active\": true,\n \"timestamp\": \"2015-05-26T18:21:08.630Z\"\n },\n \"actions\": {\n \"test_index\": {\n \"ack\": {\n \"timestamp\": \"2015-05-26T18:21:08.630Z\",\n \"state\": \"awaits_successful_execution\"\n }\n }\n }\n },\n \"watch\": {\n \"input\": {\n \"simple\": {\n \"payload\": {\n \"send\": \"yes\"\n }\n }\n },\n \"condition\": {\n \"always\": {}\n },\n \"trigger\": {\n \"schedule\": {\n \"hourly\": {\n \"minute\": [0, 5]\n }\n }\n },\n \"actions\": {\n \"test_index\": {\n \"index\": {\n \"index\": \"test\"\n }\n }\n }\n }\n}" + } } } } @@ -39416,6 +41101,12 @@ "_id", "_version" ] + }, + "examples": { + "DeleteWatchResponseExample1": { + "description": "A successful response from `DELETE _watcher/watch/my_watch`.", + "value": "{\n \"found\": true,\n \"_id\": \"my_watch\",\n \"_version\": 2\n}" + } } } } @@ -39600,6 +41291,11 @@ "type": "number" } } + }, + "examples": { + "WatcherUpdateSettingsRequestExample1": { + "value": "{\n \"index.auto_expand_replicas\": \"0-4\"\n}" + } } } }, @@ -39690,6 +41386,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "WatcherStartResponseExample1": { + "description": "A successful response from `POST _watcher/_start`.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -39775,6 +41477,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "WatcherStopResponseExample1": { + "description": "A successful response from `POST _watcher/_stop`.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -39852,6 +41560,12 @@ "license", "tagline" ] + }, + "examples": { + "XPackInfoResponseExample1": { + "description": "A successful response from `GET /_xpack`.", + "value": "{\n \"build\" : {\n \"hash\" : \"2798b1a3ce779b3611bb53a0082d4d741e4d3168\",\n \"date\" : \"2015-04-07T13:34:42Z\"\n },\n \"license\" : {\n \"uid\" : \"893361dc-9749-4997-93cb-xxx\",\n \"type\" : \"trial\",\n \"mode\" : \"trial\",\n \"status\" : \"active\",\n \"expiry_date_in_millis\" : 1542665112332\n },\n \"features\" : {\n \"ccr\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"aggregate_metric\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"analytics\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"archive\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"enrich\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"graph\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"ilm\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"logstash\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"ml\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"esql\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"monitoring\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"rollup\": {\n \"available\": true,\n \"enabled\": true\n },\n \"searchable_snapshots\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"security\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"slm\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"spatial\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"eql\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"sql\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"transform\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"voting_only\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"watcher\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"data_streams\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"data_tiers\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"enterprise_search\": {\n \"available\": true,\n \"enabled\": true\n },\n \"universal_profiling\": {\n \"available\": true,\n \"enabled\": true\n },\n \"logsdb\": {\n \"available\": true,\n \"enabled\": false\n }\n },\n \"tagline\" : \"You know, for X\"\n}" + } } } } @@ -39994,6 +41708,12 @@ "transform", "voting_only" ] + }, + "examples": { + "XPackUsageResponseExample1": { + "description": "An abbreviated response from `GET /_xpack/usage`.", + "value": "{\n \"security\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"monitoring\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"collection_enabled\" : false,\n \"enabled_exporters\" : {\n \"local\" : 1\n }\n },\n \"watcher\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"execution\" : {\n \"actions\" : {\n \"_all\" : {\n \"total\" : 0,\n \"total_time_in_ms\" : 0\n }\n }\n },\n \"watch\" : {\n \"input\" : {\n \"_all\" : {\n \"total\" : 0,\n \"active\" : 0\n }\n },\n \"trigger\" : {\n \"_all\" : {\n \"total\" : 0,\n \"active\" : 0\n }\n }\n },\n \"count\" : {\n \"total\" : 0,\n \"active\" : 0\n }\n },\n \"graph\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"ml\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"jobs\" : {\n \"_all\" : {\n \"count\" : 0,\n \"detectors\" : { },\n \"created_by\" : { },\n \"model_size\" : { },\n \"forecasts\" : {\n \"total\" : 0,\n \"forecasted_jobs\" : 0\n }\n }\n },\n \"datafeeds\" : {\n \"_all\" : {\n \"count\" : 0\n }\n },\n \"data_frame_analytics_jobs\" : {\n \"_all\" : {\n \"count\" : 0\n },\n \"analysis_counts\": { },\n \"memory_usage\": {\n \"peak_usage_bytes\": {\n \"min\": 0.0,\n \"max\": 0.0,\n \"avg\": 0.0,\n \"total\": 0.0\n }\n }\n },\n \"inference\" : {\n \"ingest_processors\" : {\n \"_all\" : {\n \"num_docs_processed\" : {\n \"max\" : 0,\n \"sum\" : 0,\n \"min\" : 0\n },\n \"pipelines\" : {\n \"count\" : 0\n },\n \"num_failures\" : {\n \"max\" : 0,\n \"sum\" : 0,\n \"min\" : 0\n },\n \"time_ms\" : {\n \"max\" : 0,\n \"sum\" : 0,\n \"min\" : 0\n }\n }\n },\n \"trained_models\" : {\n \"_all\" : {\n \"count\": 1\n },\n \"count\": {\n \"total\": 1,\n \"prepackaged\": 1,\n \"other\": 0\n },\n \"model_size_bytes\": {\n \"min\": 0.0,\n \"max\": 0.0,\n \"avg\": 0.0,\n \"total\": 0.0\n },\n \"estimated_operations\": {\n \"min\": 0.0,\n \"max\": 0.0,\n \"avg\": 0.0,\n \"total\": 0.0\n }\n },\n \"deployments\": {\n \"count\": 0,\n \"inference_counts\": {\n \"total\": 0.0,\n \"min\": 0.0,\n \"avg\": 0.0,\n \"max\": 0.0\n },\n \"stats_by_model\": [],\n \"model_sizes_bytes\": {\n \"total\": 0.0,\n \"min\": 0.0,\n \"avg\": 0.0,\n \"max\": 0.0\n },\n \"time_ms\": {\n \"avg\": 0.0\n }\n }\n },\n \"node_count\" : 1,\n \"memory\": {\n anomaly_detectors_memory_bytes: 0,\n data_frame_analytics_memory_bytes: 0,\n pytorch_inference_memory_bytes: 0,\n total_used_memory_bytes: 0\n }\n },\n \"inference\": {\n \"available\" : true,\n \"enabled\" : true,\n \"models\" : [ ]\n },\n \"logstash\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"eql\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"esql\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"features\" : {\n \"eval\" : 0,\n \"stats\" : 0,\n \"dissect\": 0,\n \"grok\" : 0,\n \"limit\" : 0,\n \"where\" : 0,\n \"sort\" : 0,\n \"drop\" : 0,\n \"show\" : 0,\n \"rename\" : 0,\n \"mv_expand\" : 0,\n \"keep\" : 0,\n \"enrich\" : 0,\n \"from\" : 0,\n \"row\" : 0\n },\n \"queries\" : {\n \"rest\" : {\n \"total\" : 0,\n \"failed\" : 0\n },\n \"kibana\" : {\n \"total\" : 0,\n \"failed\" : 0\n },\n \"_all\" : {\n \"total\" : 0,\n \"failed\" : 0\n }\n }\n },\n \"sql\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"features\" : {\n \"having\" : 0,\n \"subselect\" : 0,\n \"limit\" : 0,\n \"orderby\" : 0,\n \"where\" : 0,\n \"join\" : 0,\n \"groupby\" : 0,\n \"command\" : 0,\n \"local\" : 0\n },\n \"queries\" : {\n \"rest\" : {\n \"total\" : 0,\n \"paging\" : 0,\n \"failed\" : 0\n },\n \"cli\" : {\n \"total\" : 0,\n \"paging\" : 0,\n \"failed\" : 0\n },\n \"canvas\" : {\n \"total\" : 0,\n \"paging\" : 0,\n \"failed\" : 0\n },\n \"odbc\" : {\n \"total\" : 0,\n \"paging\" : 0,\n \"failed\" : 0\n },\n \"jdbc\" : {\n \"total\" : 0,\n \"paging\" : 0,\n \"failed\" : 0\n },\n \"odbc32\" : {\n \"total\" : 0,\n \"paging\" : 0,\n \"failed\" : 0\n },\n \"odbc64\" : {\n \"total\" : 0,\n \"paging\" : 0,\n \"failed\" : 0\n },\n \"_all\" : {\n \"total\" : 0,\n \"paging\" : 0,\n \"failed\" : 0\n },\n \"translate\" : {\n \"count\" : 0\n }\n }\n },\n \"rollup\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"ilm\" : {\n \"policy_count\" : 3,\n \"policy_stats\" : [ ]\n },\n \"slm\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"ccr\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"follower_indices_count\" : 0,\n \"auto_follow_patterns_count\" : 0\n },\n \"transform\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"voting_only\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"searchable_snapshots\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"indices_count\" : 0,\n \"full_copy_indices_count\" : 0,\n \"shared_cache_indices_count\" : 0\n },\n \"spatial\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"analytics\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"stats\": {\n \"boxplot_usage\" : 0,\n \"top_metrics_usage\" : 0,\n \"normalize_usage\" : 0,\n \"cumulative_cardinality_usage\" : 0,\n \"t_test_usage\" : 0,\n \"rate_usage\" : 0,\n \"string_stats_usage\" : 0,\n \"moving_percentiles_usage\" : 0,\n \"multi_terms_usage\" : 0\n }\n },\n \"data_streams\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"data_streams\" : 0,\n \"indices_count\" : 0\n },\n \"data_lifecycle\" : {\n \"available\": true,\n \"enabled\": true,\n \"count\": 0,\n \"default_rollover_used\": true,\n \"data_retention\": {\n \"configured_data_streams\": 0\n },\n \"effective_retention\": {\n \"retained_data_streams\": 0\n },\n \"global_retention\": {\n \"default\": {\n \"defined\": false\n },\n \"max\": {\n \"defined\": false\n }\n }\n },\n \"data_tiers\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"data_warm\" : {\n \"node_count\" : 0,\n \"index_count\" : 0,\n \"total_shard_count\" : 0,\n \"primary_shard_count\" : 0,\n \"doc_count\" : 0,\n \"total_size_bytes\" : 0,\n \"primary_size_bytes\" : 0,\n \"primary_shard_size_avg_bytes\" : 0,\n \"primary_shard_size_median_bytes\" : 0,\n \"primary_shard_size_mad_bytes\" : 0\n },\n \"data_frozen\" : {\n \"node_count\" : 1,\n \"index_count\" : 0,\n \"total_shard_count\" : 0,\n \"primary_shard_count\" : 0,\n \"doc_count\" : 0,\n \"total_size_bytes\" : 0,\n \"primary_size_bytes\" : 0,\n \"primary_shard_size_avg_bytes\" : 0,\n \"primary_shard_size_median_bytes\" : 0,\n \"primary_shard_size_mad_bytes\" : 0\n },\n \"data_cold\" : {\n \"node_count\" : 0,\n \"index_count\" : 0,\n \"total_shard_count\" : 0,\n \"primary_shard_count\" : 0,\n \"doc_count\" : 0,\n \"total_size_bytes\" : 0,\n \"primary_size_bytes\" : 0,\n \"primary_shard_size_avg_bytes\" : 0,\n \"primary_shard_size_median_bytes\" : 0,\n \"primary_shard_size_mad_bytes\" : 0\n },\n \"data_content\" : {\n \"node_count\" : 0,\n \"index_count\" : 0,\n \"total_shard_count\" : 0,\n \"primary_shard_count\" : 0,\n \"doc_count\" : 0,\n \"total_size_bytes\" : 0,\n \"primary_size_bytes\" : 0,\n \"primary_shard_size_avg_bytes\" : 0,\n \"primary_shard_size_median_bytes\" : 0,\n \"primary_shard_size_mad_bytes\" : 0\n },\n \"data_hot\" : {\n \"node_count\" : 0,\n \"index_count\" : 0,\n \"total_shard_count\" : 0,\n \"primary_shard_count\" : 0,\n \"doc_count\" : 0,\n \"total_size_bytes\" : 0,\n \"primary_size_bytes\" : 0,\n \"primary_shard_size_avg_bytes\" : 0,\n \"primary_shard_size_median_bytes\" : 0,\n \"primary_shard_size_mad_bytes\" : 0\n }\n },\n \"aggregate_metric\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"archive\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"indices_count\" : 0\n },\n \"health_api\" : {\n \"available\" : true,\n \"enabled\" : true,\n \"invocations\": {\n \"total\": 0\n }\n },\n \"remote_clusters\": {\n \"size\": 0,\n \"mode\": {\n \"proxy\": 0,\n \"sniff\": 0\n },\n \"security\": {\n \"cert\": 0,\n \"api_key\": 0\n }\n },\n \"enterprise_search\" : {\n \"available\": true,\n \"enabled\": true,\n \"search_applications\" : {\n \"count\": 0\n },\n \"analytics_collections\": {\n \"count\": 0\n },\n \"query_rulesets\": {\n \"total_rule_count\": 0,\n \"total_count\": 0,\n \"min_rule_count\": 0,\n \"max_rule_count\": 0\n }\n },\n \"universal_profiling\" : {\n \"available\" : true,\n \"enabled\" : true\n },\n \"logsdb\": {\n \"available\": true,\n \"enabled\": false,\n \"indices_count\": 0,\n \"indices_with_synthetic_source\": 0,\n \"num_docs\": 0,\n \"size_in_bytes\": 0,\n \"has_custom_cutoff_date\": false\n }\n}" + } } } } @@ -94393,6 +96113,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/async_search._types:AsyncSearchDocumentResponseBase" + }, + "examples": { + "AsyncSearchSubmitResponseExample1": { + "description": "A successful response when performing search asynchronously.", + "value": "{\n \"id\" : \"FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=\",\n \"is_partial\" : true,\n \"is_running\" : true,\n \"start_time_in_millis\" : 1583945890986,\n \"expiration_time_in_millis\" : 1584377890986,\n \"response\" : {\n \"took\" : 1122,\n \"timed_out\" : false,\n \"num_reduce_phases\" : 0,\n \"_shards\" : {\n \"total\" : 562,\n \"successful\" : 3,\n \"skipped\" : 0,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : {\n \"value\" : 157483,\n \"relation\" : \"gte\"\n },\n \"max_score\" : null,\n \"hits\" : [ ]\n }\n }\n}" + } } } } @@ -94433,6 +96159,22 @@ "items", "took" ] + }, + "examples": { + "BulkResponseExample1": { + "summary": "Multiple successful operations", + "value": "{\n \"took\": 30,\n \"errors\": false,\n \"items\": [\n {\n \"index\": {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"_version\": 1,\n \"result\": \"created\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"status\": 201,\n \"_seq_no\" : 0,\n \"_primary_term\": 1\n }\n },\n {\n \"delete\": {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"_version\": 1,\n \"result\": \"not_found\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"status\": 404,\n \"_seq_no\" : 1,\n \"_primary_term\" : 2\n }\n },\n {\n \"create\": {\n \"_index\": \"test\",\n \"_id\": \"3\",\n \"_version\": 1,\n \"result\": \"created\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"status\": 201,\n \"_seq_no\" : 2,\n \"_primary_term\" : 3\n }\n },\n {\n \"update\": {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"_version\": 2,\n \"result\": \"updated\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"status\": 200,\n \"_seq_no\" : 3,\n \"_primary_term\" : 4\n }\n }\n ]\n}" + }, + "BulkResponseExample2": { + "summary": "Failed actions", + "description": "If you run `POST /_bulk` with operations that update non-existent documents, the operations cannot complete successfully. The API returns a response with an `errors` property value `true`. The response also includes an error object for any failed operations. The error object contains additional information about the failure, such as the error type and reason.\n", + "value": "{\n \"took\": 486,\n \"errors\": true,\n \"items\": [\n {\n \"update\": {\n \"_index\": \"index1\",\n \"_id\": \"5\",\n \"status\": 404,\n \"error\": {\n \"type\": \"document_missing_exception\",\n \"reason\": \"[5]: document missing\",\n \"index_uuid\": \"aAsFqTI0Tc2W0LCWgPNrOA\",\n \"shard\": \"0\",\n \"index\": \"index1\"\n }\n }\n },\n {\n \"update\": {\n \"_index\": \"index1\",\n \"_id\": \"6\",\n \"status\": 404,\n \"error\": {\n \"type\": \"document_missing_exception\",\n \"reason\": \"[6]: document missing\",\n \"index_uuid\": \"aAsFqTI0Tc2W0LCWgPNrOA\",\n \"shard\": \"0\",\n \"index\": \"index1\"\n }\n }\n },\n {\n \"create\": {\n \"_index\": \"index1\",\n \"_id\": \"7\",\n \"_version\": 1,\n \"result\": \"created\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"status\": 201\n }\n }\n ]\n}" + }, + "BulkResponseExample3": { + "summary": "Filter for failed operations", + "description": "An example response from `POST /_bulk?filter_path=items.*.error`, which returns only information about failed operations.\n", + "value": "{\n \"items\": [\n {\n \"update\": {\n \"error\": {\n \"type\": \"document_missing_exception\",\n \"reason\": \"[5]: document missing\",\n \"index_uuid\": \"aAsFqTI0Tc2W0LCWgPNrOA\",\n \"shard\": \"0\",\n \"index\": \"index1\"\n }\n }\n },\n {\n \"update\": {\n \"error\": {\n \"type\": \"document_missing_exception\",\n \"reason\": \"[6]: document missing\",\n \"index_uuid\": \"aAsFqTI0Tc2W0LCWgPNrOA\",\n \"shard\": \"0\",\n \"index\": \"index1\"\n }\n }\n }\n ]\n}" + } } } } @@ -94675,6 +96417,12 @@ "required": [ "patterns" ] + }, + "examples": { + "GetAutoFollowPatternResponseExample1": { + "description": "A successful response from `GET /_ccr/auto_follow/my_auto_follow_pattern`, which gets auto-follow patterns.", + "value": "{\n \"patterns\": [\n {\n \"name\": \"my_auto_follow_pattern\",\n \"pattern\": {\n \"active\": true,\n \"remote_cluster\" : \"remote_cluster\",\n \"leader_index_patterns\" :\n [\n \"leader_index*\"\n ],\n \"leader_index_exclusion_patterns\":\n [\n \"leader_index_001\"\n ],\n \"follow_index_pattern\" : \"{{leader_index}}-follower\"\n }\n }\n ]\n}" + } } } } @@ -94832,6 +96580,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/cluster.health:HealthResponseBody" + }, + "examples": { + "ClusterHealthResponseExample1": { + "description": "A successful response from `GET _cluster/health`. It is the health status of a quiet single node cluster with a single index with one shard and one replica.\n", + "value": "{\n \"cluster_name\" : \"testcluster\",\n \"status\" : \"yellow\",\n \"timed_out\" : false,\n \"number_of_nodes\" : 1,\n \"number_of_data_nodes\" : 1,\n \"active_primary_shards\" : 1,\n \"active_shards\" : 1,\n \"relocating_shards\" : 0,\n \"initializing_shards\" : 0,\n \"unassigned_shards\" : 1,\n \"delayed_unassigned_shards\": 0,\n \"number_of_pending_tasks\" : 0,\n \"number_of_in_flight_fetch\": 0,\n \"task_max_waiting_in_queue_millis\": 0,\n \"active_shards_percent_as_number\": 50.0\n}" + } } } } @@ -94884,6 +96638,11 @@ "result", "id" ] + }, + "examples": { + "ConnectorPutResponseExample1": { + "value": "{\n \"result\": \"created\",\n \"id\": \"my-connector\"\n}" + } } } } @@ -94947,6 +96706,13 @@ "application/json": { "schema": { "$ref": "#/components/schemas/eql._types:EqlSearchResponseBase" + }, + "examples": { + "EqlSearchResponseExample2": { + "summary": "A successful response for performing search with an EQL query.", + "description": "", + "value": "{\n \"is_partial\": false,\n \"is_running\": false,\n \"took\": 6,\n \"timed_out\": false,\n \"hits\": {\n \"total\": {\n \"value\": 1,\n \"relation\": \"eq\"\n },\n \"sequences\": [\n {\n \"join_keys\": [\n 2012\n ],\n \"events\": [\n {\n \"_index\": \".ds-my-data-stream-2099.12.07-000001\",\n \"_id\": \"AtOJ4UjUBAAx3XR5kcCM\",\n \"_source\": {\n \"@timestamp\": \"2099-12-06T11:04:07.000Z\",\n \"event\": {\n \"category\": \"file\",\n \"id\": \"dGCHwoeS\",\n \"sequence\": 2\n },\n \"file\": {\n \"accessed\": \"2099-12-07T11:07:08.000Z\",\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\",\n \"type\": \"file\",\n \"size\": 16384\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"cmd.exe\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n }\n },\n {\n \"_index\": \".ds-my-data-stream-2099.12.07-000001\",\n \"_id\": \"OQmfCaduce8zoHT93o4H\",\n \"_source\": {\n \"@timestamp\": \"2099-12-07T11:07:09.000Z\",\n \"event\": {\n \"category\": \"process\",\n \"id\": \"aR3NWVOs\",\n \"sequence\": 4\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"regsvr32.exe\",\n \"command_line\": \"regsvr32.exe /s /u /i:https://...RegSvr32.sct scrobj.dll\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\regsvr32.exe\"\n }\n }\n }\n ]\n }\n ]\n }\n}" + } } } } @@ -94979,6 +96745,12 @@ "_id", "matched" ] + }, + "examples": { + "ExplainResponseExample1": { + "description": "A successful response from `GET /my-index-000001/_explain/0`.", + "value": "{\n \"_index\":\"my-index-000001\",\n \"_id\":\"0\",\n \"matched\":true,\n \"explanation\":{\n \"value\":1.6943598,\n \"description\":\"weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:\",\n \"details\":[\n {\n \"value\":1.6943598,\n \"description\":\"score(freq=1.0), computed as boost * idf * tf from:\",\n \"details\":[\n {\n \"value\":2.2,\n \"description\":\"boost\",\n \"details\":[]\n },\n {\n \"value\":1.3862944,\n \"description\":\"idf, computed as log(1 + (N - n + 0.5) / (n + 0.5)) from:\",\n \"details\":[\n {\n \"value\":1,\n \"description\":\"n, number of documents containing term\",\n \"details\":[]\n },\n {\n \"value\":5,\n \"description\":\"N, total number of documents with field\",\n \"details\":[]\n }\n ]\n },\n {\n \"value\":0.5555556,\n \"description\":\"tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:\",\n \"details\":[\n {\n \"value\":1.0,\n \"description\":\"freq, occurrences of term within document\",\n \"details\":[]\n },\n {\n \"value\":1.2,\n \"description\":\"k1, term saturation parameter\",\n \"details\":[]\n },\n {\n \"value\":0.75,\n \"description\":\"b, length normalization parameter\",\n \"details\":[]\n },\n {\n \"value\":3.0,\n \"description\":\"dl, length of field\",\n \"details\":[]\n },\n {\n \"value\":5.4,\n \"description\":\"avgdl, average length of field\",\n \"details\":[]\n }\n ]\n }\n ]\n }\n ]\n }\n}" + } } } } @@ -95007,6 +96779,18 @@ "indices", "fields" ] + }, + "examples": { + "FieldCapabilitiesResponseExample1": { + "summary": "Get two fields", + "description": "A successful response from `GET _field_caps?fields=rating,title`. The field `rating` is defined as a long in `index1` and `index2` and as a `keyword` in `index3` and `index4`. The field `rating` is not aggregatable in `index1`. The field `rating` is not searchable in `index4`. The field `title` is defined as text in all indices.\n", + "value": "{\n \"indices\": [ \"index1\", \"index2\", \"index3\", \"index4\", \"index5\" ],\n \"fields\": {\n \"rating\": { \n \"long\": {\n \"metadata_field\": false,\n \"searchable\": true,\n \"aggregatable\": false,\n \"indices\": [ \"index1\", \"index2\" ],\n \"non_aggregatable_indices\": [ \"index1\" ] \n },\n \"keyword\": {\n \"metadata_field\": false,\n \"searchable\": false,\n \"aggregatable\": true,\n \"indices\": [ \"index3\", \"index4\" ],\n \"non_searchable_indices\": [ \"index4\" ] \n }\n },\n \"title\": { \n \"text\": {\n \"metadata_field\": false,\n \"searchable\": true,\n \"aggregatable\": false\n }\n }\n }\n}" + }, + "FieldCapabilitiesResponseExample2": { + "summary": "Get unmapped fields", + "description": "A successful response from `GET _field_caps?fields=rating,title&include_unmapped`. The response contains an entry for each field that is present in some indices but not all. For example, the `rating` and `title` fields are unmapped in `index5`.\n", + "value": "{\n \"indices\": [ \"index1\", \"index2\", \"index3\", \"index4\", \"index5\" ],\n \"fields\": {\n \"rating\": { \n \"long\": {\n \"metadata_field\": false,\n \"searchable\": true,\n \"aggregatable\": false,\n \"indices\": [ \"index1\", \"index2\" ],\n \"non_aggregatable_indices\": [ \"index1\" ] \n },\n \"keyword\": {\n \"metadata_field\": false,\n \"searchable\": false,\n \"aggregatable\": true,\n \"indices\": [ \"index3\", \"index4\" ],\n \"non_searchable_indices\": [ \"index4\" ] \n }\n },\n \"title\": { \n \"text\": {\n \"metadata_field\": false,\n \"searchable\": true,\n \"aggregatable\": false\n }\n }\n }\n}" + } } } } @@ -95181,6 +96965,12 @@ "additionalProperties": { "$ref": "#/components/schemas/ilm.get_lifecycle:Lifecycle" } + }, + "examples": { + "GetLifecycleResponseExample1": { + "description": "A successful response when retrieving a lifecycle policy.", + "value": "{\n \"my_policy\": {\n \"version\": 1,\n \"modified_date\": 82392349,\n \"policy\": {\n \"phases\": {\n \"warm\": {\n \"min_age\": \"10d\",\n \"actions\": {\n \"forcemerge\": {\n \"max_num_segments\": 1\n }\n }\n },\n \"delete\": {\n \"min_age\": \"30d\",\n \"actions\": {\n \"delete\": {\n \"delete_searchable_snapshot\": true\n }\n }\n }\n }\n },\n \"in_use_by\" : {\n \"indices\" : [],\n \"data_streams\" : [],\n \"composable_templates\" : []\n }\n }\n}" + } } } } @@ -95191,6 +96981,18 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:WriteResponseBase" + }, + "examples": { + "IndexResponseExample1": { + "summary": "Automate document IDs", + "description": "A successful response from `POST my-index-000001/_doc/`, which contains an automated document ID.", + "value": "{\n \"_shards\": {\n \"total\": 2,\n \"failed\": 0,\n \"successful\": 2\n },\n \"_index\": \"my-index-000001\",\n \"_id\": \"W0tpsmIBdwcYyG50zbta\",\n \"_version\": 1,\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"result\": \"created\"\n}" + }, + "IndexResponseExample2": { + "summary": "Define document IDs", + "description": "A successful response from `PUT my-index-000001/_doc/1`.", + "value": "{\n \"_shards\": {\n \"total\": 2,\n \"failed\": 0,\n \"successful\": 2\n },\n \"_index\": \"my-index-000001\",\n \"_id\": \"1\",\n \"_version\": 1,\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"result\": \"created\"\n}" + } } } } @@ -95401,6 +97203,23 @@ "additionalProperties": { "$ref": "#/components/schemas/indices.get_field_mapping:TypeFieldMappings" } + }, + "examples": { + "indicesGetFieldMappingResponseExample1": { + "summary": "A single field mapping", + "description": "A sucessful response from `GET publications/_mapping/field/title`, which returns the mapping of a field called `title`.\n", + "value": "{\n \"publications\": {\n \"mappings\": {\n \"title\": {\n \"full_name\": \"title\",\n \"mapping\": {\n \"title\": {\n \"type\": \"text\"\n }\n }\n }\n }\n }\n}" + }, + "indicesGetFieldMappingResponseExample2": { + "summary": "Multiple field mappings", + "description": "A successful response from `GET publications/_mapping/field/author.id,abstract,name`. The get field mapping API also supports wildcard notation.\n", + "value": "{\n \"publications\": {\n \"mappings\": {\n \"author.id\": {\n \"full_name\": \"author.id\",\n \"mapping\": {\n \"id\": {\n \"type\": \"text\"\n }\n }\n },\n \"abstract\": {\n \"full_name\": \"abstract\",\n \"mapping\": {\n \"abstract\": {\n \"type\": \"text\"\n }\n }\n }\n }\n }\n}" + }, + "indicesGetFieldMappingResponseExample3": { + "summary": "Wildcards", + "description": "A successful response from `GET publications/_mapping/field/a*`.\n", + "value": "{\n \"publications\": {\n \"mappings\": {\n \"author.name\": {\n \"full_name\": \"author.name\",\n \"mapping\": {\n \"name\": {\n \"type\": \"text\"\n }\n }\n },\n \"abstract\": {\n \"full_name\": \"abstract\",\n \"mapping\": {\n \"abstract\": {\n \"type\": \"text\"\n }\n }\n },\n \"author.id\": {\n \"full_name\": \"author.id\",\n \"mapping\": {\n \"id\": {\n \"type\": \"text\"\n }\n }\n }\n }\n }\n}" + } } } } @@ -95823,6 +97642,28 @@ "application/json": { "schema": { "$ref": "#/components/schemas/inference._types:InferenceResult" + }, + "examples": { + "InferenceResponseExample1": { + "summary": "Completion task", + "description": "A successful response from `POST _inference/completion/openai_chat_completions`.\n", + "value": "{\n \"completion\": [\n {\n \"result\": \"Elastic is a company that provides a range of software solutions for search, logging, security, and analytics. Their flagship product is Elasticsearch, an open-source, distributed search engine that allows users to search, analyze, and visualize large volumes of data in real-time. Elastic also offers products such as Kibana, a data visualization tool, and Logstash, a log management and pipeline tool, as well as various other tools and solutions for data analysis and management.\"\n }\n ]\n}" + }, + "InferenceResponseExample2": { + "summary": "Rerank task", + "description": "A successful response from `POST _inference/rerank/cohere_rerank`.\n", + "value": "{\n \"rerank\": [\n {\n \"index\": \"2\",\n \"relevance_score\": \"0.011597361\",\n \"text\": \"leia\"\n },\n {\n \"index\": \"0\",\n \"relevance_score\": \"0.006338922\",\n \"text\": \"luke\"\n },\n {\n \"index\": \"5\",\n \"relevance_score\": \"0.0016166499\",\n \"text\": \"star\"\n },\n {\n \"index\": \"4\",\n \"relevance_score\": \"0.0011695103\",\n \"text\": \"r2d2\"\n },\n {\n \"index\": \"1\",\n \"relevance_score\": \"5.614787E-4\",\n \"text\": \"like\"\n },\n {\n \"index\": \"6\",\n \"relevance_score\": \"3.7850367E-4\",\n \"text\": \"wars\"\n },\n {\n \"index\": \"3\",\n \"relevance_score\": \"1.2508839E-5\",\n \"text\": \"chewy\"\n }\n ]\n}" + }, + "InferenceResponseExample3": { + "summary": "Sparse embedding task", + "description": "An abbreviated response from `POST _inference/sparse_embedding/my-elser-model`.\n", + "value": "{\n \"sparse_embedding\": [\n {\n \"port\": 2.1259406,\n \"sky\": 1.7073475,\n \"color\": 1.6922266,\n \"dead\": 1.6247464,\n \"television\": 1.3525393,\n \"above\": 1.2425821,\n \"tuned\": 1.1440028,\n \"colors\": 1.1218185,\n \"tv\": 1.0111054,\n \"ports\": 1.0067928,\n \"poem\": 1.0042328,\n \"channel\": 0.99471164,\n \"tune\": 0.96235967,\n \"scene\": 0.9020516\n }\n ]\n}" + }, + "InferenceResponseExample4": { + "summary": "Text embedding task", + "description": "An abbreviated response from `POST _inference/text_embedding/my-cohere-endpoint`.\n", + "value": "{\n \"text_embedding\": [\n {\n \"embedding\": [\n {\n 0.018569946,\n -0.036895752,\n 0.01486969,\n -0.0045204163,\n -0.04385376,\n 0.0075950623,\n 0.04260254,\n -0.004005432,\n 0.007865906,\n 0.030792236,\n -0.050476074,\n 0.011795044,\n -0.011642456,\n -0.010070801\n }\n ]\n }\n ]\n}" + } } } } @@ -95939,6 +97780,12 @@ "required": [ "docs" ] + }, + "examples": { + "SimulatePipelineResponseExample1": { + "description": "A successful response for running an ingest pipeline against a set of provided documents.", + "value": "{\n \"docs\": [\n {\n \"doc\": {\n \"_id\": \"id\",\n \"_index\": \"index\",\n \"_version\": \"-3\",\n \"_source\": {\n \"field2\": \"_value\",\n \"foo\": \"bar\"\n },\n \"_ingest\": {\n \"timestamp\": \"2017-05-04T22:30:03.187Z\"\n }\n }\n },\n {\n \"doc\": {\n \"_id\": \"id\",\n \"_index\": \"index\",\n \"_version\": \"-3\",\n \"_source\": {\n \"field2\": \"_value\",\n \"foo\": \"rab\"\n },\n \"_ingest\": {\n \"timestamp\": \"2017-05-04T22:30:03.188Z\"\n }\n }\n }\n ]\n}" + } } } } @@ -96141,6 +97988,12 @@ "required": [ "deleted" ] + }, + "examples": { + "MlDeleteExpiredDataResponseExample1": { + "description": "A successful response when deleting expired and unused anomaly detection data.", + "value": "{\n \"deleted\": true\n}" + } } } } @@ -96151,6 +98004,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteForecastResponseExample1": { + "description": "A successful response when deleting a forecast from an anomaly detection job.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -96177,6 +98036,12 @@ "field_selection", "memory_estimation" ] + }, + "examples": { + "MlExplainDataFrameAnalyticsResponseExample1": { + "description": "A succesful response for explaining a data frame analytics job configuration.", + "value": "{\n \"field_selection\": [\n {\n \"field\": \"number_of_bedrooms\",\n \"mappings_types\": [\n \"integer\"\n ],\n \"is_included\": true,\n \"is_required\": false,\n \"feature_type\": \"numerical\"\n },\n {\n \"field\": \"postcode\",\n \"mappings_types\": [\n \"text\"\n ],\n \"is_included\": false,\n \"is_required\": false,\n \"reason\": \"[postcode.keyword] is preferred because it is aggregatable\"\n },\n {\n \"field\": \"postcode.keyword\",\n \"mappings_types\": [\n \"keyword\"\n ],\n \"is_included\": true,\n \"is_required\": false,\n \"feature_type\": \"categorical\"\n },\n {\n \"field\": \"price\",\n \"mappings_types\": [\n \"float\"\n ],\n \"is_included\": true,\n \"is_required\": true,\n \"feature_type\": \"numerical\"\n }\n ],\n \"memory_estimation\": {\n \"expected_memory_without_disk\": \"128MB\",\n \"expected_memory_with_disk\": \"32MB\"\n }\n}" + } } } } @@ -96774,6 +98639,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/nodes.reload_secure_settings:ResponseBase" + }, + "examples": { + "ReloadSecureSettingsResponseExample1": { + "description": "A successful response when reloading keystore on nodes in your cluster.", + "value": "{\n \"_nodes\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0\n },\n \"cluster_name\": \"my_cluster\",\n \"nodes\": {\n \"pQHNt5rXTTWNvUgOrdynKg\": {\n \"name\": \"node-0\"\n }\n }\n}" + } } } } @@ -96880,6 +98751,12 @@ "required": [ "jobs" ] + }, + "examples": { + "GetRollupJobResponseExample1": { + "description": "A successful response from `GET _rollup/job/sensor`.", + "value": "{\n \"jobs\": [\n {\n \"config\": {\n \"id\": \"sensor\",\n \"index_pattern\": \"sensor-*\",\n \"rollup_index\": \"sensor_rollup\",\n \"cron\": \"*/30 * * * * ?\",\n \"groups\": {\n \"date_histogram\": {\n \"fixed_interval\": \"1h\",\n \"delay\": \"7d\",\n \"field\": \"timestamp\",\n \"time_zone\": \"UTC\"\n },\n \"terms\": {\n \"fields\": [\n \"node\"\n ]\n }\n },\n \"metrics\": [\n {\n \"field\": \"temperature\",\n \"metrics\": [\n \"min\",\n \"max\",\n \"sum\"\n ]\n },\n {\n \"field\": \"voltage\",\n \"metrics\": [\n \"avg\"\n ]\n }\n ],\n \"timeout\": \"20s\",\n \"page_size\": 1000\n },\n \"status\": {\n \"job_state\": \"stopped\"\n },\n \"stats\": {\n \"pages_processed\": 0,\n \"documents_processed\": 0,\n \"rollups_indexed\": 0,\n \"trigger_count\": 0,\n \"index_failures\": 0,\n \"index_time_in_ms\": 0,\n \"index_total\": 0,\n \"search_failures\": 0,\n \"search_time_in_ms\": 0,\n \"search_total\": 0,\n \"processing_time_in_ms\": 0,\n \"processing_total\": 0\n }\n }\n ]\n}" + } } } } @@ -96893,6 +98770,12 @@ "additionalProperties": { "$ref": "#/components/schemas/rollup.get_rollup_caps:RollupCapabilities" } + }, + "examples": { + "GetRollupCapabilitiesResponseExample1": { + "description": "A successful response from `GET _rollup/data/sensor-*` for a rollup job that targets the index pattern `sensor-*`. The response contains the rollup job ID, the index that holds the rolled data, and the index pattern that the job was targeting. It also shows a list of fields that contain data eligible for rollup searches. For example, you can use a `min`, `max`, or `sum` aggregation on the `temperature` field, but only a `date_histogram` on `timestamp`.\n", + "value": "{\n \"sensor-*\" : {\n \"rollup_jobs\" : [\n {\n \"job_id\" : \"sensor\",\n \"rollup_index\" : \"sensor_rollup\",\n \"index_pattern\" : \"sensor-*\",\n \"fields\" : {\n \"node\" : [\n {\n \"agg\" : \"terms\"\n }\n ],\n \"temperature\" : [\n {\n \"agg\" : \"min\"\n },\n {\n \"agg\" : \"max\"\n },\n {\n \"agg\" : \"sum\"\n }\n ],\n \"timestamp\" : [\n {\n \"agg\" : \"date_histogram\",\n \"time_zone\" : \"UTC\",\n \"fixed_interval\" : \"1h\",\n \"delay\": \"7d\"\n }\n ],\n \"voltage\" : [\n {\n \"agg\" : \"avg\"\n }\n ]\n }\n }\n ]\n }\n}" + } } } } @@ -96932,6 +98815,12 @@ "_shards", "hits" ] + }, + "examples": { + "RollupSearchResponseExample1": { + "description": "An abbreviated response from `GET /sensor_rollup/_rollup_search` with a `max` aggregation on a `temperature` field. The response provides some metadata about the request (`took`, `_shards`), the search hits (which is always empty for rollup searches), and the aggregation response.\n", + "value": "{\n \"took\" : 102,\n \"timed_out\" : false,\n \"terminated_early\" : false,\n \"_shards\" : {} ,\n \"hits\" : {\n \"total\" : {\n \"value\": 0,\n \"relation\": \"eq\"\n },\n \"max_score\" : 0.0,\n \"hits\" : [ ]\n },\n \"aggregations\" : {\n \"max_temperature\" : {\n \"value\" : 202.0\n }\n }\n}" + } } } } @@ -96950,6 +98839,23 @@ "required": [ "result" ] + }, + "examples": { + "ExecutePainlessScriptResponseExample1": { + "summary": "Test context", + "description": "A successful response from `POST /_scripts/painless/_execute` with a `painless_test` context.", + "value": "{\n \"result\": \"0.1\"\n}" + }, + "ExecutePainlessScriptResponseExample2": { + "summary": "Filter context", + "description": "A successful response from `POST /_scripts/painless/_execute` with a `filter` context.", + "value": "{\n \"result\": true\n}" + }, + "ExecutePainlessScriptResponseExample3": { + "summary": "Score context", + "description": "A successful response from `POST /_scripts/painless/_execute` with a `score` context.", + "value": "{\n \"result\": 0.8\n}" + } } } } @@ -97003,6 +98909,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:MapboxVectorTiles" + }, + "examples": { + "SearchMvtResponseExample1": { + "description": "A successful response from `GET museums/_mvt/location/13/4207/2692`. It returns results as a binary vector tile. When decoded into JSON, the tile contains the following data.\n", + "value": "{\n \"hits\": {\n \"extent\": 4096,\n \"version\": 2,\n \"features\": [\n {\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n 3208,\n 3864\n ]\n },\n \"properties\": {\n \"_id\": \"1\",\n \"_index\": \"museums\",\n \"name\": \"NEMO Science Museum\",\n \"price\": 1750\n },\n \"type\": 1\n },\n {\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n 3429,\n 3496\n ]\n },\n \"properties\": {\n \"_id\": \"3\",\n \"_index\": \"museums\",\n \"name\": \"Nederlands Scheepvaartmuseum\",\n \"price\": 1650\n },\n \"type\": 1\n },\n {\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n 3429,\n 3496\n ]\n },\n \"properties\": {\n \"_id\": \"4\",\n \"_index\": \"museums\",\n \"name\": \"Amsterdam Centre for Architecture\",\n \"price\": 0\n },\n \"type\": 1\n }\n ]\n },\n \"aggs\": {\n \"extent\": 4096,\n \"version\": 2,\n \"features\": [\n {\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n 3072,\n 3072\n ],\n [\n 4096,\n 3072\n ],\n [\n 4096,\n 4096\n ],\n [\n 3072,\n 4096\n ],\n [\n 3072,\n 3072\n ]\n ]\n ]\n },\n \"properties\": {\n \"_count\": 3,\n \"max_price.value\": 1750.0,\n \"min_price.value\": 0.0,\n \"avg_price.value\": 1133.3333333333333\n },\n \"type\": 3\n }\n ]\n },\n \"meta\": {\n \"extent\": 4096,\n \"version\": 2,\n \"features\": [\n {\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n 0,\n 0\n ],\n [\n 4096,\n 0\n ],\n [\n 4096,\n 4096\n ],\n [\n 0,\n 4096\n ],\n [\n 0,\n 0\n ]\n ]\n ]\n },\n \"properties\": {\n \"_shards.failed\": 0,\n \"_shards.skipped\": 0,\n \"_shards.successful\": 1,\n \"_shards.total\": 1,\n \"aggregations._count.avg\": 3.0,\n \"aggregations._count.count\": 1,\n \"aggregations._count.max\": 3.0,\n \"aggregations._count.min\": 3.0,\n \"aggregations._count.sum\": 3.0,\n \"aggregations.avg_price.avg\": 1133.3333333333333,\n \"aggregations.avg_price.count\": 1,\n \"aggregations.avg_price.max\": 1133.3333333333333,\n \"aggregations.avg_price.min\": 1133.3333333333333,\n \"aggregations.avg_price.sum\": 1133.3333333333333,\n \"aggregations.max_price.avg\": 1750.0,\n \"aggregations.max_price.count\": 1,\n \"aggregations.max_price.max\": 1750.0,\n \"aggregations.max_price.min\": 1750.0,\n \"aggregations.max_price.sum\": 1750.0,\n \"aggregations.min_price.avg\": 0.0,\n \"aggregations.min_price.count\": 1,\n \"aggregations.min_price.max\": 0.0,\n \"aggregations.min_price.min\": 0.0,\n \"aggregations.min_price.sum\": 0.0,\n \"hits.max_score\": 0.0,\n \"hits.total.relation\": \"eq\",\n \"hits.total.value\": 3,\n \"timed_out\": false,\n \"took\": 2\n },\n \"type\": 3\n }\n ]\n }\n}" + } } } } @@ -97041,6 +98953,12 @@ "shards", "indices" ] + }, + "examples": { + "SearchShardsResponseExample1": { + "description": "An abbreviated response from `GET /my-index-000001/_search_shards`.", + "value": "{\n \"nodes\": {},\n \"indices\": {\n \"my-index-000001\": { }\n },\n \"shards\": [\n [\n {\n \"index\": \"my-index-000001\",\n \"node\": \"JklnKbD7Tyqi9TP3_Q_tBg\",\n \"relocating_node\": null,\n \"primary\": true,\n \"shard\": 0,\n \"state\": \"STARTED\",\n \"allocation_id\": {\"id\":\"0TvkCyF7TAmM1wHP4a42-A\"},\n \"relocation_failure_info\" : {\n \"failed_attempts\" : 0\n }\n }\n ],\n [\n {\n \"index\": \"my-index-000001\",\n \"node\": \"JklnKbD7Tyqi9TP3_Q_tBg\",\n \"relocating_node\": null,\n \"primary\": true,\n \"shard\": 1,\n \"state\": \"STARTED\",\n \"allocation_id\": {\"id\":\"fMju3hd1QHWmWrIgFnI4Ww\"},\n \"relocation_failure_info\" : {\n \"failed_attempts\" : 0\n }\n }\n ],\n [\n {\n \"index\": \"my-index-000001\",\n \"node\": \"JklnKbD7Tyqi9TP3_Q_tBg\",\n \"relocating_node\": null,\n \"primary\": true,\n \"shard\": 2,\n \"state\": \"STARTED\",\n \"allocation_id\": {\"id\":\"Nwl0wbMBTHCWjEEbGYGapg\"},\n \"relocation_failure_info\" : {\n \"failed_attempts\" : 0\n }\n }\n ],\n [\n {\n \"index\": \"my-index-000001\",\n \"node\": \"JklnKbD7Tyqi9TP3_Q_tBg\",\n \"relocating_node\": null,\n \"primary\": true,\n \"shard\": 3,\n \"state\": \"STARTED\",\n \"allocation_id\": {\"id\":\"bU_KLGJISbW0RejwnwDPKw\"},\n \"relocation_failure_info\" : {\n \"failed_attempts\" : 0\n }\n }\n ],\n [\n {\n \"index\": \"my-index-000001\",\n \"node\": \"JklnKbD7Tyqi9TP3_Q_tBg\",\n \"relocating_node\": null,\n \"primary\": true,\n \"shard\": 4,\n \"state\": \"STARTED\",\n \"allocation_id\": {\"id\":\"DMs7_giNSwmdqVukF7UydA\"},\n \"relocation_failure_info\" : {\n \"failed_attempts\" : 0\n }\n }\n ]\n ]\n }" + } } } } @@ -97134,6 +99052,12 @@ "required": [ "nodes" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `GET /_searchable_snapshots/cache/stats`.", + "value": "{\n \"nodes\" : {\n \"eerrtBMtQEisohZzxBLUSw\" : {\n \"shared_cache\" : {\n \"reads\" : 6051,\n \"bytes_read_in_bytes\" : 5448829,\n \"writes\" : 37,\n \"bytes_written_in_bytes\" : 1208320,\n \"evictions\" : 5,\n \"num_regions\" : 65536,\n \"size_in_bytes\" : 1099511627776,\n \"region_size_in_bytes\" : 16777216\n }\n }\n }\n}" + } } } } @@ -97212,6 +99136,12 @@ "name", "encoded" ] + }, + "examples": { + "SecurityCreateApiKeyResponseExample1": { + "description": "A successful response from `POST /_security/api_key`.", + "value": "{\n \"id\": \"VuaCfGcBCdbkQm-e5aOx\", \n \"name\": \"my-api-key\",\n \"expiration\": 1544068612110, \n \"api_key\": \"ui2lp2axTNmsyakw9tvNnw\", \n \"encoded\": \"VnVhQ2ZHY0JDZGJrUW0tZTVhT3g6dWkybHAyYXhUTm1zeWFrdzl0dk5udw==\" \n}" + } } } } @@ -97234,6 +99164,12 @@ "created", "token" ] + }, + "examples": { + "CreateServiceTokenRequestExample1": { + "description": "A successful response from `POST /_security/service/elastic/fleet-server/credential/token/token1`. The response includes the service account token, its name, and its secret value as a bearer token.\n", + "value": "{\n \"created\": true,\n \"token\": {\n \"name\": \"token1\",\n \"value\": \"AAEAAWVsYXN0aWM...vZmxlZXQtc2VydmVyL3Rva2VuMTo3TFdaSDZ\" \n }\n}" + } } } } @@ -97290,6 +99226,12 @@ "$ref": "#/components/schemas/security.put_privileges:Actions" } } + }, + "examples": { + "SecurityGetPrivilegesResponseExample1": { + "description": "A successful response from `GET /_security/privilege/myapp/read`. The response contains information about the `read` privilege for the `app01` application.\n", + "value": "{\n \"myapp\": {\n \"read\": {\n \"application\": \"myapp\",\n \"name\": \"read\",\n \"actions\": [\n \"data:read/*\",\n \"action:login\"\n ],\n \"metadata\": {\n \"description\": \"Read access to myapp\"\n }\n }\n }\n}" + } } } } @@ -97303,6 +99245,12 @@ "additionalProperties": { "$ref": "#/components/schemas/security.get_role:Role" } + }, + "examples": { + "SecurityGetRoleResponseExample1": { + "description": "A successful response from `GET /_security/role/my_admin_role`. The response contains information about the `my_admin_role` role in the native realm.\n", + "value": "{\n \"my_admin_role\": {\n \"description\": \"Grants full access to all management features within the cluster.\",\n \"cluster\" : [ \"all\" ],\n \"indices\" : [\n {\n \"names\" : [ \"index1\", \"index2\" ],\n \"privileges\" : [ \"all\" ],\n \"allow_restricted_indices\" : false,\n \"field_security\" : {\n \"grant\" : [ \"title\", \"body\" ]}\n }\n ],\n \"applications\" : [ ],\n \"run_as\" : [ \"other_user\" ],\n \"metadata\" : {\n \"version\" : 1\n },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n}" + } } } } @@ -97316,6 +99264,12 @@ "additionalProperties": { "$ref": "#/components/schemas/security._types:RoleMapping" } + }, + "examples": { + "SecurityGetRoleMappingResponseExample1": { + "description": "A successful response from `GET /_security/role_mapping/mapping1`.", + "value": "{\n \"mapping1\": {\n \"enabled\": true,\n \"roles\": [\n \"user\"\n ],\n \"rules\": {\n \"field\": {\n \"username\": \"*\"\n }\n },\n \"metadata\": {}\n }\n}" + } } } } @@ -97329,6 +99283,12 @@ "additionalProperties": { "$ref": "#/components/schemas/security.get_service_accounts:RoleDescriptorWrapper" } + }, + "examples": { + "GetServiceAccountsResponseExample1": { + "description": "A successful response from `GET /_security/service/elastic/fleet-server`. The response contains information about the `elastic/fleet-server` service account.\n", + "value": "{\n \"elastic/fleet-server\": {\n \"role_descriptor\": {\n \"cluster\": [\n \"monitor\",\n \"manage_own_api_key\",\n \"read_fleet_secrets\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"logs-*\",\n \"metrics-*\",\n \"traces-*\",\n \".logs-endpoint.diagnostic.collection-*\",\n \".logs-endpoint.action.responses-*\",\n \".logs-endpoint.heartbeat-*\"\n ],\n \"privileges\": [\n \"write\",\n \"create_index\",\n \"auto_configure\"\n ],\n \"allow_restricted_indices\": false\n },\n {\n \"names\": [\n \"profiling-*\"\n ],\n \"privileges\": [\n \"read\",\n \"write\"\n ],\n \"allow_restricted_indices\": false\n },\n {\n \"names\": [\n \"traces-apm.sampled-*\"\n ],\n \"privileges\": [\n \"read\",\n \"monitor\",\n \"maintenance\"\n ],\n \"allow_restricted_indices\": false\n },\n {\n \"names\": [\n \".fleet-secrets*\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": true\n },\n {\n \"names\": [\n \".fleet-actions*\"\n ],\n \"privileges\": [\n \"read\",\n \"write\",\n \"monitor\",\n \"create_index\",\n \"auto_configure\",\n \"maintenance\"\n ],\n \"allow_restricted_indices\": true\n },\n {\n \"names\": [\n \".fleet-agents*\"\n ],\n \"privileges\": [\n \"read\",\n \"write\",\n \"monitor\",\n \"create_index\",\n \"auto_configure\",\n \"maintenance\"\n ],\n \"allow_restricted_indices\": true\n },\n {\n \"names\": [\n \".fleet-artifacts*\"\n ],\n \"privileges\": [\n \"read\",\n \"write\",\n \"monitor\",\n \"create_index\",\n \"auto_configure\",\n \"maintenance\"\n ],\n \"allow_restricted_indices\": true\n },\n {\n \"names\": [\n \".fleet-enrollment-api-keys*\"\n ],\n \"privileges\": [\n \"read\",\n \"write\",\n \"monitor\",\n \"create_index\",\n \"auto_configure\",\n \"maintenance\"\n ],\n \"allow_restricted_indices\": true\n },\n {\n \"names\": [\n \".fleet-policies*\"\n ],\n \"privileges\": [\n \"read\",\n \"write\",\n \"monitor\",\n \"create_index\",\n \"auto_configure\",\n \"maintenance\"\n ],\n \"allow_restricted_indices\": true\n },\n {\n \"names\": [\n \".fleet-policies-leader*\"\n ],\n \"privileges\": [\n \"read\",\n \"write\",\n \"monitor\",\n \"create_index\",\n \"auto_configure\",\n \"maintenance\"\n ],\n \"allow_restricted_indices\": true\n },\n {\n \"names\": [\n \".fleet-servers*\"\n ],\n \"privileges\": [\n \"read\",\n \"write\",\n \"monitor\",\n \"create_index\",\n \"auto_configure\",\n \"maintenance\"\n ],\n \"allow_restricted_indices\": true\n },\n {\n \"names\": [\n \".fleet-fileds*\"\n ],\n \"privileges\": [\n \"read\",\n \"write\",\n \"monitor\",\n \"create_index\",\n \"auto_configure\",\n \"maintenance\"\n ],\n \"allow_restricted_indices\": true\n },\n {\n \"names\": [\n \"synthetics-*\"\n ],\n \"privileges\": [\n \"read\",\n \"write\",\n \"create_index\",\n \"auto_configure\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [\n {\n \"application\": \"kibana-*\",\n \"privileges\": [\n \"reserved_fleet-setup\"\n ],\n \"resources\": [\n \"*\"\n ]\n }\n ],\n \"run_as\": [],\n \"metadata\": {},\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n}" + } } } } @@ -97342,6 +99302,12 @@ "additionalProperties": { "$ref": "#/components/schemas/security._types:User" } + }, + "examples": { + "SecurityGetUserResponseExample1": { + "description": "A successful response from `GET /_security/user/jacknich?with_profile_uid=true`. It includes the user `profile_uid` as part of the response.\n", + "value": "{\n \"jacknich\": {\n \"username\": \"jacknich\",\n \"roles\": [\n \"admin\", \"other_role1\"\n ],\n \"full_name\": \"Jack Nicholson\",\n \"email\": \"jacknich@example.com\",\n \"metadata\": { \"intelligence\" : 7 },\n \"enabled\": true,\n \"profile_uid\": \"u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0\"\n }\n}" + } } } } @@ -97382,6 +99348,12 @@ "index", "username" ] + }, + "examples": { + "SecurityHasPrivilegesResponseExample1": { + "description": "A successful response from `GET /_security/user/_has_privileges`, which lists the privileges for the `rdeniro` user.", + "value": "{\n \"username\": \"rdeniro\",\n \"has_all_requested\" : false,\n \"cluster\" : {\n \"monitor\" : true,\n \"manage\" : false\n },\n \"index\" : {\n \"suppliers\" : {\n \"read\" : true\n },\n \"products\" : {\n \"read\" : true\n },\n \"inventory\" : {\n \"read\" : true,\n \"write\" : false\n }\n },\n \"application\" : {\n \"inventory_manager\" : {\n \"product/1852563\" : {\n \"read\": false,\n \"data:write/inventory\": false\n }\n }\n }\n}" + } } } } @@ -97407,6 +99379,12 @@ "required": [ "has_privilege_uids" ] + }, + "examples": { + "ResponseExample1": { + "description": "A response from `POST /_security/profile/_has_privileges` that indicates only one of the three users has all the privileges and one of them is not found.\n", + "value": "{\n \"has_privilege_uids\": [\"u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1\"],\n \"errors\": {\n \"count\": 1,\n \"details\": {\n \"u_does-not-exist_0\": {\n \"type\": \"resource_not_found_exception\",\n \"reason\": \"profile document not found\"\n }\n }\n }\n}" + } } } } @@ -97423,6 +99401,16 @@ "$ref": "#/components/schemas/security._types:CreatedStatus" } } + }, + "examples": { + "SecurityPutPrivilegesResponseExample1": { + "description": "A successful response from `PUT /_security/privilege`.", + "value": "{\n \"myapp\": {\n \"read\": {\n \"created\": true \n }\n }\n}" + }, + "SecurityPutPrivilegesResponseExample2": { + "description": "A successful response from `PUT /_security/privilege`. The `created` property indicates whether the privileges have been created or updated.\n", + "value": "{\n \"app02\": {\n \"all\": {\n \"created\": true\n }\n },\n \"app01\": {\n \"read\": {\n \"created\": true\n },\n \"write\": {\n \"created\": true\n }\n }\n}" + } } } } @@ -97441,6 +99429,12 @@ "required": [ "role" ] + }, + "examples": { + "SecurityPutRoleResponseExample1": { + "description": "A successful response from `POST /_security/role/my_admin_role`.", + "value": "{\n \"role\": {\n \"created\": true \n }\n}" + } } } } @@ -97462,6 +99456,12 @@ "required": [ "role_mapping" ] + }, + "examples": { + "SecurityPutRoleMappingResponseExample1": { + "description": "A successful response from `POST /_security/role_mapping/mapping1`.", + "value": "{\n \"role_mapping\" : {\n \"created\" : true \n }\n}" + } } } } @@ -97481,6 +99481,12 @@ "required": [ "created" ] + }, + "examples": { + "SecurityPutUserResponseExample1": { + "description": "A successful response from `POST /_security/user/jacknich`. When an existing user is updated, `created` is set to `false`.\n", + "value": "{\n \"created\": true \n}" + } } } } @@ -97520,6 +99526,23 @@ "count", "api_keys" ] + }, + "examples": { + "QueryApiKeysResponseExample1": { + "summary": "Query API keys by ID", + "description": "A successful response from `GET /_security/_query/api_key?with_limited_by=true`. The `limited_by` details are the owner user's permissions associated with the API key. It is a point-in-time snapshot captured at creation and subsequent updates. An API key's effective permissions are an intersection of its assigned privileges and the owner user's permissions.\n", + "value": "{\n \"api_keys\": [\n {\n \"id\": \"VuaCfGcBCdbkQm-e5aOx\",\n \"name\": \"application-key-1\",\n \"creation\": 1548550550158,\n \"expiration\": 1548551550158,\n \"invalidated\": false,\n \"username\": \"myuser\",\n \"realm\": \"native1\",\n \"realm_type\": \"native\",\n \"metadata\": {\n \"application\": \"my-application\"\n },\n \"role_descriptors\": { },\n \"limited_by\": [ \n {\n \"role-power-user\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"*\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n ]\n }\n ]\n}" + }, + "QueryApiKeysResponseExample2": { + "summary": "Query API keys with pagination", + "description": "An abbreviated response from `GET /_security/_query/api_key` that contains a list of matched API keys along with their sort values. The first sort value is creation time, which is displayed in `date_time` format. The second sort value is the API key name.\n", + "value": "{\n \"total\": 100,\n \"count\": 10,\n \"api_keys\": [\n {\n \"id\": \"CLXgVnsBOGkf8IyjcXU7\",\n \"name\": \"app1-key-79\",\n \"creation\": 1629250154811,\n \"invalidated\": false,\n \"username\": \"org-admin-user\",\n \"realm\": \"native1\",\n \"metadata\": {\n \"environment\": \"production\"\n },\n \"role_descriptors\": { },\n \"_sort\": [\n \"2021-08-18T01:29:14.811Z\", \n \"app1-key-79\" \n ]\n },\n {\n \"id\": \"BrXgVnsBOGkf8IyjbXVB\",\n \"name\": \"app1-key-78\",\n \"creation\": 1629250153794,\n \"invalidated\": false,\n \"username\": \"org-admin-user\",\n \"realm\": \"native1\",\n \"metadata\": {\n \"environment\": \"production\"\n },\n \"role_descriptors\": { },\n \"_sort\": [\n \"2021-08-18T01:29:13.794Z\",\n \"app1-key-78\"\n ]\n }\n ]\n}" + }, + "QueryApiKeysResponseExample3": { + "summary": "Query all API keys", + "description": "A successful response from `GET /_security/_query/api_key`. It includes the role descriptors that are assigned to each API key when it was created or last updated. Note that an API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of the owner user's permissions. An empty role descriptors object means the API key inherits the owner user's permissions.\n", + "value": "{\n \"total\": 3,\n \"count\": 3,\n \"api_keys\": [ \n {\n \"id\": \"nkvrGXsB8w290t56q3Rg\",\n \"name\": \"my-api-key-1\",\n \"creation\": 1628227480421,\n \"expiration\": 1629091480421,\n \"invalidated\": false,\n \"username\": \"elastic\",\n \"realm\": \"reserved\",\n \"realm_type\": \"reserved\",\n \"metadata\": {\n \"letter\": \"a\"\n },\n \"role_descriptors\": { \n \"role-a\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index-a\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n },\n {\n \"id\": \"oEvrGXsB8w290t5683TI\",\n \"name\": \"my-api-key-2\",\n \"creation\": 1628227498953,\n \"expiration\": 1628313898953,\n \"invalidated\": false,\n \"username\": \"elastic\",\n \"realm\": \"reserved\",\n \"metadata\": {\n \"letter\": \"b\"\n },\n \"role_descriptors\": { } \n }\n ]\n}" + } } } } @@ -97552,6 +99575,18 @@ "count", "roles" ] + }, + "examples": { + "QueryRolesResponseExample1": { + "summary": "Query roles by name", + "description": "A successful response from `POST /_security/_query/role`. It returns a JSON structure that contains the information retrieved for one or more roles.\n", + "value": "{\n \"total\": 2,\n \"count\": 2,\n \"roles\": [ \n {\n \"name\" : \"my_admin_role\",\n \"cluster\" : [\n \"all\"\n ],\n \"indices\" : [\n {\n \"names\" : [\n \"index1\",\n \"index2\"\n ],\n \"privileges\" : [\n \"all\"\n ],\n \"field_security\" : {\n \"grant\" : [\n \"title\",\n \"body\"\n ]\n },\n \"allow_restricted_indices\" : false\n }\n ],\n \"applications\" : [ ],\n \"run_as\" : [\n \"other_user\"\n ],\n \"metadata\" : {\n \"version\" : 1\n },\n \"transient_metadata\" : {\n \"enabled\" : true\n },\n \"description\" : \"Grants full access to all management features within the cluster.\",\n \"_sort\" : [\n \"my_admin_role\"\n ]\n },\n {\n \"name\" : \"my_user_role\",\n \"cluster\" : [ ],\n \"indices\" : [\n {\n \"names\" : [\n \"index1\",\n \"index2\"\n ],\n \"privileges\" : [\n \"all\"\n ],\n \"field_security\" : {\n \"grant\" : [\n \"title\",\n \"body\"\n ]\n },\n \"allow_restricted_indices\" : false\n }\n ],\n \"applications\" : [ ],\n \"run_as\" : [ ],\n \"metadata\" : {\n \"version\" : 1\n },\n \"transient_metadata\" : {\n \"enabled\" : true\n },\n \"description\" : \"Grants user access to some indicies.\",\n \"_sort\" : [\n \"my_user_role\"\n ]\n }\n ]\n}" + }, + "QueryRolesResponseExample2": { + "summary": "Query roles by description", + "description": "A successful response from `POST /_security/_query/role`.\n", + "value": "{\n \"total\": 2,\n \"count\": 1,\n \"roles\": [\n {\n \"name\" : \"my_user_role\",\n \"cluster\" : [ ],\n \"indices\" : [\n {\n \"names\" : [\n \"index1\",\n \"index2\"\n ],\n \"privileges\" : [\n \"all\"\n ],\n \"field_security\" : {\n \"grant\" : [\n \"title\",\n \"body\"\n ]\n },\n \"allow_restricted_indices\" : false\n }\n ],\n \"applications\" : [ ],\n \"run_as\" : [ ],\n \"metadata\" : {\n \"version\" : 1\n },\n \"transient_metadata\" : {\n \"enabled\" : true\n },\n \"description\" : \"Grants user access to some indicies.\"\n }\n ]\n}" + } } } } @@ -97584,6 +99619,23 @@ "count", "users" ] + }, + "examples": { + "SecurityQueryUserResponseExample1": { + "summary": "Query users by role prefix", + "description": "A successful response from `POST /_security/_query/user?with_profile_uid=true` that contains users that have roles that are prefixed with `other`. It also includes the user `profile_uid` in the response.\n", + "value": "{\n \"total\": 1,\n \"count\": 1,\n \"users\": [\n {\n \"username\": \"jacknich\",\n \"roles\": [\n \"admin\",\n \"other_role1\"\n ],\n \"full_name\": \"Jack Nicholson\",\n \"email\": \"jacknich@example.com\",\n \"metadata\": {\n \"intelligence\": 7\n },\n \"enabled\": true,\n \"profile_uid\": \"u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0\"\n }\n ]\n}" + }, + "SecurityQueryUserResponseExample2": { + "summary": "Query users with multiple conditions", + "description": "A successful response from `POST /_security/_query/user` that uses a `bool` query to issue complex logical conditions and uses `from`, `size`, and `sort` to help paginate the result. The sort value is `username`.\n", + "value": "{\n \"total\": 5,\n \"count\": 2,\n \"users\": [\n {\n \"username\": \"ray\",\n \"roles\": [\n \"other_role3\"\n ],\n \"full_name\": \"Ray Nicholson\",\n \"email\": \"rayn@example.com\",\n \"metadata\": {\n \"intelligence\": 7\n },\n \"enabled\": true,\n \"_sort\": [\n \"ray\" \n ]\n },\n {\n \"username\": \"lorraine\",\n \"roles\": [\n \"other_role3\"\n ],\n \"full_name\": \"Lorraine Nicholson\",\n \"email\": \"lorraine@example.com\",\n \"metadata\": {\n \"intelligence\": 7\n },\n \"enabled\": true,\n \"_sort\": [\n \"lorraine\"\n ]\n }\n ]\n}" + }, + "SecurityQueryUserResponseExample3": { + "summary": "Query all users", + "description": "A successful response from `GET /_security/_query/user`, which lists all users. It returns a JSON structure that contains the information retrieved from one or more users.\n", + "value": "{\n \"total\": 2,\n \"count\": 2,\n \"users\": [ \n {\n \"username\": \"jacknich\",\n \"roles\": [\n \"admin\",\n \"other_role1\"\n ],\n \"full_name\": \"Jack Nicholson\",\n \"email\": \"jacknich@example.com\",\n \"metadata\": {\n \"intelligence\": 7\n },\n \"enabled\": true\n },\n {\n \"username\": \"sandrakn\",\n \"roles\": [\n \"admin\",\n \"other_role1\"\n ],\n \"full_name\": \"Sandra Knight\",\n \"email\": \"sandrakn@example.com\",\n \"metadata\": {\n \"intelligence\": 7\n },\n \"enabled\": true\n }\n ]\n}" + } } } } @@ -97615,6 +99667,12 @@ "took", "profiles" ] + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `GET /_security/saml/metadata/saml1`. It contains the SAML metadata that was generated for the SAML realm as an XML string.\n", + "value": "{\n \"metadata\" : \"\"\n}" + } } } } @@ -97625,6 +99683,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "ResponseExample1": { + "description": "A successful response from `POST /_security/profile/u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0/_data`, which indicates that the request is acknowledged.\n", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -97646,6 +99710,12 @@ "required": [ "nodes" ] + }, + "examples": { + "ShutdownGetNodeResponseExample1": { + "description": "Get the status of shutdown preparations with `GET /_nodes/USpTGYaBSIKbgSUJR2Z9lg/shutdown`. The response shows information about the shutdown preparations, including the status of shard migration, task migration, and plugin cleanup\n", + "value": "{\n \"nodes\": [\n {\n \"node_id\": \"USpTGYaBSIKbgSUJR2Z9lg\",\n \"type\": \"RESTART\",\n \"reason\": \"Demonstrating how the node shutdown API works\",\n \"shutdown_startedmillis\": 1624406108685,\n \"allocation_delay\": \"10m\",\n \"status\": \"COMPLETE\",\n \"shard_migration\": {\n \"status\": \"COMPLETE\",\n \"shard_migrations_remaining\": 0,\n \"explanation\": \"no shard relocation is necessary for a node restart\"\n },\n \"persistent_tasks\": {\n \"status\": \"COMPLETE\"\n },\n \"plugins\": {\n \"status\": \"COMPLETE\"\n }\n }\n ]\n}" + } } } } @@ -97667,6 +99737,23 @@ "required": [ "docs" ] + }, + "examples": { + "SimulateIngestResponseExample1": { + "summary": "Use an existing pipeline definition", + "description": "A successful response when the simulation uses pipeline definitions that are already in the system.", + "value": "{\n \"docs\": [\n {\n \"doc\": null,\n \"_id\": 123,\n \"_index\": \"my-index\",\n \"_version\": -3,\n \"_source\": {\n \"field1\": \"value1\",\n \"field2\": \"value2\",\n \"foo\": \"bar\"\n },\n \"executed_pipelines\": [\n \"my-pipeline\",\n \"my-final-pipeline\"\n ]\n },\n {\n \"doc\": null,\n \"_id\": 456,\n \"_index\": \"my-index\",\n \"_version\": \"-3,\",\n \"_source\": {\n \"field1\": \"value1\",\n \"field2\": \"value2\",\n \"foo\": \"rab\"\n },\n \"executed_pipelines\": [\n \"my-pipeline\",\n \"my-final-pipeline\"\n ]\n }\n ]\n}" + }, + "SimulateIngestResponseExample2": { + "summary": "Use pipeline substitutions", + "description": "A successful response when the simulation uses pipeline substitutions.", + "value": "{\n \"docs\": [\n {\n \"doc\": null,\n \"_id\": 123,\n \"_index\": \"my-index\",\n \"_version\": -3,\n \"_source\": {\n \"field2\": \"value2\",\n \"foo\": \"BAR\"\n },\n \"executed_pipelines\": [\n \"my-pipeline\",\n \"my-final-pipeline\"\n ]\n },\n {\n \"doc\": null,\n \"_id\": 456,\n \"_index\": \"my-index\",\n \"_version\": -3,\n \"_source\": {\n \"field2\": \"value2\",\n \"foo\": \"RAB\"\n },\n \"executed_pipelines\": [\n \"my-pipeline\",\n \"my-final-pipeline\"\n ]\n }\n ]\n}" + }, + "SimulateIngestResponseExample3": { + "summary": "Use pipeline substitutions", + "description": "A successful response when the simulation uses pipeline substitutions.", + "value": "{\n \"docs\": [\n {\n \"doc\": {\n \"_id\": \"123\",\n \"_index\": \"my-index\",\n \"_version\": -3,\n \"_source\": {\n \"foo\": \"foo\"\n },\n \"executed_pipelines\": []\n }\n },\n {\n \"doc\": {\n \"_id\": \"456\",\n \"_index\": \"my-index\",\n \"_version\": -3,\n \"_source\": {\n \"bar\": \"rab\"\n },\n \"executed_pipelines\": []\n }\n }\n ]\n}" + } } } } @@ -97680,6 +99767,12 @@ "additionalProperties": { "$ref": "#/components/schemas/slm._types:SnapshotLifecycle" } + }, + "examples": { + "GetSnapshotLifecycleResponseExample1": { + "description": "A successful response from `GET _slm/policy/daily-snapshots?human`.", + "value": "{\n \"daily-snapshots\": {\n \"version\": 1,\n \"modified_date\": \"2099-05-06T01:30:00.000Z\",\n \"modified_date_millis\": 4081757400000,\n \"policy\" : {\n \"schedule\": \"0 30 1 * * ?\",\n \"name\": \"\",\n \"repository\": \"my_repository\",\n \"config\": {\n \"indices\": [\"data-*\", \"important\"],\n \"ignore_unavailable\": false,\n \"include_global_state\": false\n },\n \"retention\": {\n \"expire_after\": \"30d\",\n \"min_count\": 5,\n \"max_count\": 50\n }\n },\n \"stats\": {\n \"policy\": \"daily-snapshots\",\n \"snapshots_taken\": 0,\n \"snapshots_failed\": 0,\n \"snapshots_deleted\": 0,\n \"snapshot_deletion_failures\": 0\n },\n \"next_execution\": \"2099-05-07T01:30:00.000Z\",\n \"next_execution_millis\": 4081843800000\n }\n}" + } } } } @@ -97699,6 +99792,12 @@ "$ref": "#/components/schemas/snapshot._types:SnapshotInfo" } } + }, + "examples": { + "SnapshotCreateResponseExample1": { + "description": "A successful response from `PUT /_snapshot/my_repository/snapshot_2?wait_for_completion=true`.", + "value": "{\n \"snapshot\": {\n \"snapshot\": \"snapshot_2\",\n \"uuid\": \"vdRctLCxSketdKb54xw67g\",\n \"repository\": \"my_repository\",\n \"version_id\": ,\n \"version\": ,\n \"indices\": [],\n \"data_streams\": [],\n \"feature_states\": [],\n \"include_global_state\": false,\n \"metadata\": {\n \"taken_by\": \"user123\",\n \"taken_because\": \"backup before upgrading\"\n },\n \"state\": \"SUCCESS\",\n \"start_time\": \"2020-06-25T14:00:28.850Z\",\n \"start_time_in_millis\": 1593093628850,\n \"end_time\": \"2020-06-25T14:00:28.850Z\",\n \"end_time_in_millis\": 1593094752018,\n \"duration_in_millis\": 0,\n \"failures\": [],\n \"shards\": {\n \"total\": 0,\n \"failed\": 0,\n \"successful\": 0\n }\n }\n}" + } } } } @@ -97722,6 +99821,12 @@ "additionalProperties": { "$ref": "#/components/schemas/snapshot._types:Repository" } + }, + "examples": { + "SnapshotGetRepositoryResponseExample1": { + "description": "A successful response from `GET /_snapshot/my_repository`.", + "value": "{\n \"my_repository\" : {\n \"type\" : \"fs\",\n \"uuid\" : \"0JLknrXbSUiVPuLakHjBrQ\",\n \"settings\" : {\n \"location\" : \"my_backup_location\"\n }\n }\n}" + } } } } @@ -97743,6 +99848,12 @@ "required": [ "snapshots" ] + }, + "examples": { + "SnapshotStatusResponseExample1": { + "description": "A successful response from `GET _snapshot/my_repository/snapshot_2/_status`. The response contains detailed status information for `snapshot_2` in the `my_repository` repository.\n", + "value": "{\n \"snapshots\" : [\n {\n \"snapshot\" : \"snapshot_2\",\n \"repository\" : \"my_repository\",\n \"uuid\" : \"lNeQD1SvTQCqqJUMQSwmGg\",\n \"state\" : \"SUCCESS\",\n \"include_global_state\" : false,\n \"shards_stats\" : {\n \"initializing\" : 0,\n \"started\" : 0,\n \"finalizing\" : 0,\n \"done\" : 1,\n \"failed\" : 0,\n \"total\" : 1\n },\n \"stats\" : {\n \"incremental\" : {\n \"file_count\" : 3,\n \"size_in_bytes\" : 5969\n },\n \"total\" : {\n \"file_count\" : 4,\n \"size_in_bytes\" : 6024\n },\n \"start_time_in_millis\" : 1594829326691,\n \"time_in_millis\" : 205\n },\n \"indices\" : {\n \"index_1\" : {\n \"shards_stats\" : {\n \"initializing\" : 0,\n \"started\" : 0,\n \"finalizing\" : 0,\n \"done\" : 1,\n \"failed\" : 0,\n \"total\" : 1\n },\n \"stats\" : {\n \"incremental\" : {\n \"file_count\" : 3,\n \"size_in_bytes\" : 5969\n },\n \"total\" : {\n \"file_count\" : 4,\n \"size_in_bytes\" : 6024\n },\n \"start_time_in_millis\" : 1594829326896,\n \"time_in_millis\" : 0\n },\n \"shards\" : {\n \"0\" : {\n \"stage\" : \"DONE\",\n \"stats\" : {\n \"incremental\" : {\n \"file_count\" : 3,\n \"size_in_bytes\" : 5969\n },\n \"total\" : {\n \"file_count\" : 4,\n \"size_in_bytes\" : 6024\n },\n \"start_time_in_millis\" : 1594829326896,\n \"time_in_millis\" : 0\n }\n }\n }\n }\n }\n }\n ]\n}" + } } } } @@ -97863,6 +99974,12 @@ "terms", "complete" ] + }, + "examples": { + "TermsEnumResponseExample1": { + "description": "A successful response from `POST stackoverflow/_terms_enum`.", + "value": "{\n \"_shards\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0\n },\n \"terms\": [\n \"kibana\"\n ],\n \"complete\" : true\n}" + } } } } @@ -97902,6 +100019,23 @@ "took", "_version" ] + }, + "examples": { + "TermVectorsResponseExample1": { + "summary": "Return stored term vectors", + "description": "A successful response from `GET /my-index-000001/_termvectors/1`.", + "value": "{\n \"_index\": \"my-index-000001\",\n \"_id\": \"1\",\n \"_version\": 1,\n \"found\": true,\n \"took\": 6,\n \"term_vectors\": {\n \"text\": {\n \"field_statistics\": {\n \"sum_doc_freq\": 4,\n \"doc_count\": 2,\n \"sum_ttf\": 6\n },\n \"terms\": {\n \"test\": {\n \"doc_freq\": 2,\n \"ttf\": 4,\n \"term_freq\": 3,\n \"tokens\": [\n {\n \"position\": 0,\n \"start_offset\": 0,\n \"end_offset\": 4,\n \"payload\": \"d29yZA==\"\n },\n {\n \"position\": 1,\n \"start_offset\": 5,\n \"end_offset\": 9,\n \"payload\": \"d29yZA==\"\n },\n {\n \"position\": 2,\n \"start_offset\": 10,\n \"end_offset\": 14,\n \"payload\": \"d29yZA==\"\n }\n ]\n }\n }\n }\n }\n}" + }, + "TermVectorsResponseExample2": { + "summary": "Per-field analyzer", + "description": "A successful response from `GET /my-index-000001/_termvectors` with `per_field_analyzer` in the request body.", + "value": "{\n \"_index\": \"my-index-000001\",\n \"_version\": 0,\n \"found\": true,\n \"took\": 6,\n \"term_vectors\": {\n \"fullname\": {\n \"field_statistics\": {\n \"sum_doc_freq\": 2,\n \"doc_count\": 4,\n \"sum_ttf\": 4\n },\n \"terms\": {\n \"John Doe\": {\n \"term_freq\": 1,\n \"tokens\": [\n {\n \"position\": 0,\n \"start_offset\": 0,\n \"end_offset\": 8\n }\n ]\n }\n }\n }\n }\n}" + }, + "TermVectorsResponseExample3": { + "summary": "Terms filtering", + "description": "A successful response from `GET /my-index-000001/_termvectors` with a `filter` in the request body.", + "value": "{\n \"_index\": \"imdb\",\n \"_version\": 0,\n \"found\": true,\n \"term_vectors\": {\n \"plot\": {\n \"field_statistics\": {\n \"sum_doc_freq\": 3384269,\n \"doc_count\": 176214,\n \"sum_ttf\": 3753460\n },\n \"terms\": {\n \"armored\": {\n \"doc_freq\": 27,\n \"ttf\": 27,\n \"term_freq\": 1,\n \"score\": 9.74725\n },\n \"industrialist\": {\n \"doc_freq\": 88,\n \"ttf\": 88,\n \"term_freq\": 1,\n \"score\": 8.590818\n },\n \"stark\": {\n \"doc_freq\": 44,\n \"ttf\": 47,\n \"term_freq\": 1,\n \"score\": 9.272792\n }\n }\n }\n }\n}" + } } } } @@ -97979,6 +100113,12 @@ "num_messages_analyzed", "sample_start" ] + }, + "examples": { + "FindMessageStructureResponseExample1": { + "description": "A successful response from `POST _text_structure/find_message_structure`.", + "value": "{\n \"num_lines_analyzed\" : 22,\n \"num_messages_analyzed\" : 22,\n \"sample_start\" : \"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\\n[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]\\n\",\n \"charset\" : \"UTF-8\",\n \"format\" : \"semi_structured_text\",\n \"multiline_start_pattern\" : \"^\\\\[\\\\b\\\\d{4}-\\\\d{2}-\\\\d{2}[T ]\\\\d{2}:\\\\d{2}\",\n \"grok_pattern\" : \"\\\\[%{TIMESTAMP_ISO8601:timestamp}\\\\]\\\\[%{LOGLEVEL:loglevel} \\\\]\\\\[.*\",\n \"ecs_compatibility\" : \"disabled\",\n \"timestamp_field\" : \"timestamp\",\n \"joda_timestamp_formats\" : [\n \"ISO8601\"\n ],\n \"java_timestamp_formats\" : [\n \"ISO8601\"\n ],\n \"need_client_timezone\" : true,\n \"mappings\" : {\n \"properties\" : {\n \"@timestamp\" : {\n \"type\" : \"date\"\n },\n \"loglevel\" : {\n \"type\" : \"keyword\"\n },\n \"message\" : {\n \"type\" : \"text\"\n }\n }\n },\n \"ingest_pipeline\" : {\n \"description\" : \"Ingest pipeline created by text structure finder\",\n \"processors\" : [\n {\n \"grok\" : {\n \"field\" : \"message\",\n \"patterns\" : [\n \"\\\\[%{TIMESTAMP_ISO8601:timestamp}\\\\]\\\\[%{LOGLEVEL:loglevel} \\\\]\\\\[.*\"\n ],\n \"ecs_compatibility\" : \"disabled\"\n }\n },\n {\n \"date\" : {\n \"field\" : \"timestamp\",\n \"timezone\" : \"{{ event.timezone }}\",\n \"formats\" : [\n \"ISO8601\"\n ]\n }\n },\n {\n \"remove\" : {\n \"field\" : \"timestamp\"\n }\n }\n ]\n },\n \"field_stats\" : {\n \"loglevel\" : {\n \"count\" : 22,\n \"cardinality\" : 1,\n \"top_hits\" : [\n {\n \"value\" : \"INFO\",\n \"count\" : 22\n }\n ]\n },\n \"message\" : {\n \"count\" : 22,\n \"cardinality\" : 22,\n \"top_hits\" : [\n {\n \"value\" : \"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]\",\n \"count\" : 1\n },\n {\n \"value\" : \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]\",\n \"count\" : 1\n }\n ]\n },\n \"timestamp\" : {\n \"count\" : 22,\n \"cardinality\" : 14,\n \"earliest\" : \"2024-03-05T10:52:36,256\",\n \"latest\" : \"2024-03-05T10:52:49,199\",\n \"top_hits\" : [\n {\n \"value\" : \"2024-03-05T10:52:41,044\",\n \"count\" : 6\n },\n {\n \"value\" : \"2024-03-05T10:52:41,043\",\n \"count\" : 3\n },\n {\n \"value\" : \"2024-03-05T10:52:41,059\",\n \"count\" : 2\n },\n {\n \"value\" : \"2024-03-05T10:52:36,256\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:41,038\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:41,042\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:43,291\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:46,098\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:47,227\",\n \"count\" : 1\n },\n {\n \"value\" : \"2024-03-05T10:52:47,259\",\n \"count\" : 1\n }\n ]\n }\n }\n}" + } } } } @@ -98000,6 +100140,12 @@ "required": [ "matches" ] + }, + "examples": { + "TestGrokPatternResponseExample1": { + "description": "A successful response from `GET _text_structure/test_grok_pattern`.", + "value": "{\n \"matches\": [\n {\n \"matched\": true,\n \"fields\": {\n \"first_name\": [\n {\n \"match\": \"John\",\n \"offset\": 6,\n \"length\": 4\n }\n ],\n \"last_name\": [\n {\n \"match\": \"Doe\",\n \"offset\": 11,\n \"length\": 3\n }\n ]\n }\n },\n {\n \"matched\": false\n }\n ]\n}" + } } } } @@ -98025,6 +100171,12 @@ "count", "transforms" ] + }, + "examples": { + "GetTransformResponseExample1": { + "description": "A successful response that contains configuration information for a transform.", + "value": "{\n \"count\": 1,\n \"transforms\": [\n {\n \"id\": \"ecommerce_transform1\",\n \"authorization\": {\n \"roles\": [\n \"superuser\"\n ]\n },\n \"version\": \"8.4.0\",\n \"create_time\": 1656023416565,\n \"source\": {\n \"index\": [\n \"kibana_sample_data_ecommerce\"\n ],\n \"query\": {\n \"term\": {\n \"geoip.continent_name\": {\n \"value\": \"Asia\"\n }\n }\n }\n },\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform1\",\n \"pipeline\": \"add_timestamp_pipeline\"\n },\n \"frequency\": \"5m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"60s\"\n }\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\"\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n },\n \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n \"settings\": {},\n \"retention_policy\": {\n \"time\": {\n \"field\": \"order_date\",\n \"max_age\": \"30d\"\n }\n }\n }\n ]\n}" + } } } } @@ -98050,6 +100202,12 @@ "generated_dest_index", "preview" ] + }, + "examples": { + "PreviewTransformResponseExample1": { + "description": "An abbreviated response from `POST _transform/_preview` that contains a preview a transform that uses the pivot method.", + "value": "{\n \"preview\": [\n {\n \"max_price\": 171,\n \"customer_id\": \"10\"\n },\n {\n \"max_price\": 233,\n \"customer_id\": \"11\"\n },\n {\n \"max_price\": 200,\n \"customer_id\": \"12\"\n },\n {\n \"max_price\": 301,\n \"customer_id\": \"13\"\n },\n {\n \"max_price\": 176,\n \"customer_id\": \"14\"\n },\n {\n \"max_price\": 2250,\n \"customer_id\": \"15\"\n },\n {\n \"max_price\": 170,\n \"customer_id\": \"16\"\n },\n {\n \"max_price\": 243,\n \"customer_id\": \"17\"\n },\n {\n \"max_price\": 154,\n \"customer_id\": \"18\"\n },\n {\n \"max_price\": 393,\n \"customer_id\": \"19\"\n },\n {\n \"max_price\": 165,\n \"customer_id\": \"20\"\n },\n {\n \"max_price\": 115,\n \"customer_id\": \"21\"\n },\n {\n \"max_price\": 192,\n \"customer_id\": \"22\"\n },\n {\n \"max_price\": 169,\n \"customer_id\": \"23\"\n },\n {\n \"max_price\": 230,\n \"customer_id\": \"24\"\n },\n {\n \"max_price\": 278,\n \"customer_id\": \"25\"\n },\n {\n \"max_price\": 200,\n \"customer_id\": \"26\"\n },\n {\n \"max_price\": 344,\n \"customer_id\": \"27\"\n },\n {\n \"max_price\": 175,\n \"customer_id\": \"28\"\n },\n {\n \"max_price\": 177,\n \"customer_id\": \"29\"\n },\n {\n \"max_price\": 190,\n \"customer_id\": \"30\"\n },\n {\n \"max_price\": 190,\n \"customer_id\": \"31\"\n },\n {\n \"max_price\": 205,\n \"customer_id\": \"32\"\n },\n {\n \"max_price\": 215,\n \"customer_id\": \"33\"\n },\n {\n \"max_price\": 270,\n \"customer_id\": \"34\"\n },\n {\n \"max_price\": 184,\n \"customer_id\": \"36\"\n },\n {\n \"max_price\": 222,\n \"customer_id\": \"37\"\n },\n {\n \"max_price\": 370,\n \"customer_id\": \"38\"\n },\n {\n \"max_price\": 240,\n \"customer_id\": \"39\"\n },\n {\n \"max_price\": 230,\n \"customer_id\": \"4\"\n },\n {\n \"max_price\": 229,\n \"customer_id\": \"41\"\n },\n {\n \"max_price\": 190,\n \"customer_id\": \"42\"\n },\n {\n \"max_price\": 150,\n \"customer_id\": \"43\"\n },\n {\n \"max_price\": 175,\n \"customer_id\": \"44\"\n },\n {\n \"max_price\": 190,\n \"customer_id\": \"45\"\n },\n {\n \"max_price\": 150,\n \"customer_id\": \"46\"\n },\n {\n \"max_price\": 310,\n \"customer_id\": \"48\"\n },\n {\n \"max_price\": 223,\n \"customer_id\": \"49\"\n },\n {\n \"max_price\": 283,\n \"customer_id\": \"5\"\n },\n {\n \"max_price\": 185,\n \"customer_id\": \"50\"\n },\n {\n \"max_price\": 190,\n \"customer_id\": \"51\"\n },\n {\n \"max_price\": 333,\n \"customer_id\": \"52\"\n },\n {\n \"max_price\": 165,\n \"customer_id\": \"6\"\n },\n {\n \"max_price\": 144,\n \"customer_id\": \"7\"\n },\n {\n \"max_price\": 198,\n \"customer_id\": \"8\"\n },\n {\n \"max_price\": 210,\n \"customer_id\": \"9\"\n }\n ],\n \"generated_dest_index\": {\n \"mappings\": {\n \"_meta\": {\n \"_transform\": {\n \"transform\": \"transform-preview\",\n \"version\": {\n \"created\": \"10.0.0\"\n },\n \"creation_date_in_millis\": 1712948905889\n },\n \"created_by\": \"transform\"\n },\n \"properties\": {\n \"max_price\": {\n \"type\": \"half_float\"\n },\n \"customer_id\": {\n \"type\": \"keyword\"\n }\n }\n },\n \"settings\": {\n \"index\": {\n \"number_of_shards\": \"1\",\n \"auto_expand_replicas\": \"0-1\"\n }\n },\n \"aliases\": {}\n }\n}" + } } } } @@ -98068,6 +100226,12 @@ "required": [ "status" ] + }, + "examples": { + "WatcherAckWatchResponseExample1": { + "description": "A successful response from `POST _watcher/watch/my_watch/_ack`.", + "value": "{\n \"status\": {\n \"state\": {\n \"active\": true,\n \"timestamp\": \"2015-05-26T18:04:27.723Z\"\n },\n \"last_checked\": \"2015-05-26T18:04:27.753Z\",\n \"last_met_condition\": \"2015-05-26T18:04:27.763Z\",\n \"actions\": {\n \"test_index\": {\n \"ack\" : {\n \"timestamp\": \"2015-05-26T18:04:27.713Z\",\n \"state\": \"acked\"\n },\n \"last_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.733Z\",\n \"successful\": true\n },\n \"last_successful_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.773Z\",\n \"successful\": true\n }\n }\n },\n \"execution_state\": \"executed\",\n \"version\": 2\n }\n}" + } } } } @@ -98126,6 +100290,12 @@ "_id", "watch_record" ] + }, + "examples": { + "WatcherExecuteWatchResponseExample1": { + "description": "A successful response from `POST _watcher/watch/my_watch/_execute`.\n", + "value": "{\n \"_id\": \"my_watch_0-2015-06-02T23:17:55.124Z\", \n \"watch_record\": { \n \"@timestamp\": \"2015-06-02T23:17:55.124Z\",\n \"watch_id\": \"my_watch\",\n \"node\": \"my_node\",\n \"messages\": [],\n \"trigger_event\": {\n \"type\": \"manual\",\n \"triggered_time\": \"2015-06-02T23:17:55.124Z\",\n \"manual\": {\n \"schedule\": {\n \"scheduled_time\": \"2015-06-02T23:17:55.124Z\"\n }\n }\n },\n \"state\": \"executed\",\n \"status\": {\n \"version\": 1,\n \"execution_state\": \"executed\",\n \"state\": {\n \"active\": true,\n \"timestamp\": \"2015-06-02T23:17:55.111Z\"\n },\n \"last_checked\": \"2015-06-02T23:17:55.124Z\",\n \"last_met_condition\": \"2015-06-02T23:17:55.124Z\",\n \"actions\": {\n \"test_index\": {\n \"ack\": {\n \"timestamp\": \"2015-06-02T23:17:55.124Z\",\n \"state\": \"ackable\"\n },\n \"last_execution\": {\n \"timestamp\": \"2015-06-02T23:17:55.124Z\",\n \"successful\": true\n },\n \"last_successful_execution\": {\n \"timestamp\": \"2015-06-02T23:17:55.124Z\",\n \"successful\": true\n }\n }\n }\n },\n \"input\": {\n \"simple\": {\n \"payload\": {\n \"send\": \"yes\"\n }\n }\n },\n \"condition\": {\n \"always\": {}\n },\n \"result\": { \n \"execution_time\": \"2015-06-02T23:17:55.124Z\",\n \"execution_duration\": 12608,\n \"input\": {\n \"type\": \"simple\",\n \"payload\": {\n \"foo\": \"bar\"\n },\n \"status\": \"success\"\n },\n \"condition\": {\n \"type\": \"always\",\n \"met\": true,\n \"status\": \"success\"\n },\n \"actions\": [\n {\n \"id\": \"test_index\",\n \"index\": {\n \"response\": {\n \"index\": \"test\",\n \"version\": 1,\n \"created\": true,\n \"result\": \"created\",\n \"id\": \"AVSHKzPa9zx62AzUzFXY\"\n }\n },\n \"status\": \"success\",\n \"type\": \"index\"\n }\n ]\n },\n \"user\": \"test_admin\" \n }\n}" + } } } } @@ -98187,6 +100357,12 @@ "count", "watches" ] + }, + "examples": { + "WatcherQueryWatchesResponseExample1": { + "description": "A successful response from `GET /_watcher/_query/watches`.", + "value": "{\n \"count\": 1,\n \"watches\": [\n {\n \"_id\": \"my_watch\",\n \"watch\": {\n \"trigger\": {\n \"schedule\": {\n \"hourly\": {\n \"minute\": [\n 0,\n 5\n ]\n }\n }\n },\n \"input\": {\n \"simple\": {\n \"payload\": {\n \"send\": \"yes\"\n }\n }\n },\n \"condition\": {\n \"always\": {}\n },\n \"actions\": {\n \"test_index\": {\n \"index\": {\n \"index\": \"test\"\n }\n }\n }\n },\n \"status\": {\n \"state\": {\n \"active\": true,\n \"timestamp\": \"2015-05-26T18:21:08.630Z\"\n },\n \"actions\": {\n \"test_index\": {\n \"ack\": {\n \"timestamp\": \"2015-05-26T18:21:08.630Z\",\n \"state\": \"awaits_successful_execution\"\n }\n }\n },\n \"version\": -1\n },\n \"_seq_no\": 0,\n \"_primary_term\": 1\n }\n ]\n}" + } } } } @@ -98220,6 +100396,23 @@ "manually_stopped", "stats" ] + }, + "examples": { + "WatcherStatsResponseExample1": { + "summary": "Basic metrics", + "description": "A successful response from `GET _watcher/stats`.", + "value": "{\n \"watcher_state\": \"started\", \n \"watch_count\": 1, \n \"execution_thread_pool\": {\n \"size\": 1000, \n \"max_size\": 1 \n }\n}" + }, + "WatcherStatsResponseExample2": { + "summary": "Current watch metrics", + "description": "A successful response from `GET _watcher/stats?metric=current_watches`.", + "value": "{\n \"watcher_state\": \"started\",\n \"watch_count\": 2,\n \"execution_thread_pool\": {\n \"queue_size\": 1000,\n \"max_size\": 20\n },\n \"current_watches\": [ \n {\n \"watch_id\": \"slow_condition\", \n \"watch_record_id\": \"slow_condition_3-2015-05-13T07:42:32.179Z\", \n \"triggered_time\": \"2015-05-12T11:53:51.800Z\", \n \"execution_time\": \"2015-05-13T07:42:32.179Z\", \n \"execution_phase\": \"condition\" \n }\n ]\n}" + }, + "WatcherStatsResponseExample3": { + "summary": "Queued watch metrics", + "description": "An abbreviated response from `GET _watcher/stats/queued_watches`.", + "value": "{\n \"watcher_state\": \"started\",\n \"watch_count\": 10,\n \"execution_thread_pool\": {\n \"queue_size\": 1000,\n \"max_size\": 20\n },\n \"queued_watches\": [ \n {\n \"watch_id\": \"slow_condition4\", \n \"watch_record_id\": \"slow_condition4_223-2015-05-21T11:59:59.811Z\", \n \"triggered_time\": \"2015-05-21T11:59:59.811Z\", \n \"execution_time\": \"2015-05-21T11:59:59.811Z\" \n }\n ]\n}" + } } } } @@ -108122,6 +110315,12 @@ } } } + }, + "examples": { + "AsyncSearchSubmitRequestExample1": { + "description": "Perform a search request asynchronously with `POST /sales*/_async_search?size=0`. It accepts the same parameters and request body as the search API.\n", + "value": "{\n \"sort\": [\n { \"date\": { \"order\": \"asc\" } }\n ],\n \"aggs\": {\n \"sale_date\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"calendar_interval\": \"1d\"\n }\n }\n }\n}" + } } } } @@ -108144,6 +110343,28 @@ } ] } + }, + "examples": { + "BulkRequestExample1": { + "summary": "Multiple operations", + "description": "Run `POST _bulk` to perform multiple operations.", + "value": "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n{ \"delete\" : { \"_index\" : \"test\", \"_id\" : \"2\" } }\n{ \"create\" : { \"_index\" : \"test\", \"_id\" : \"3\" } }\n{ \"field1\" : \"value3\" }\n{ \"update\" : {\"_id\" : \"1\", \"_index\" : \"test\"} }\n{ \"doc\" : {\"field2\" : \"value2\"} }" + }, + "BulkRequestExample2": { + "summary": "Bulk updates", + "description": "When you run `POST _bulk` and use the `update` action, you can use `retry_on_conflict` as a field in the action itself (not in the extra payload line) to specify how many times an update should be retried in the case of a version conflict.\n", + "value": "{ \"update\" : {\"_id\" : \"1\", \"_index\" : \"index1\", \"retry_on_conflict\" : 3} }\n{ \"doc\" : {\"field\" : \"value\"} }\n{ \"update\" : { \"_id\" : \"0\", \"_index\" : \"index1\", \"retry_on_conflict\" : 3} }\n{ \"script\" : { \"source\": \"ctx._source.counter += params.param1\", \"lang\" : \"painless\", \"params\" : {\"param1\" : 1}}, \"upsert\" : {\"counter\" : 1}}\n{ \"update\" : {\"_id\" : \"2\", \"_index\" : \"index1\", \"retry_on_conflict\" : 3} }\n{ \"doc\" : {\"field\" : \"value\"}, \"doc_as_upsert\" : true }\n{ \"update\" : {\"_id\" : \"3\", \"_index\" : \"index1\", \"_source\" : true} }\n{ \"doc\" : {\"field\" : \"value\"} }\n{ \"update\" : {\"_id\" : \"4\", \"_index\" : \"index1\"} }\n{ \"doc\" : {\"field\" : \"value\"}, \"_source\": true}" + }, + "BulkRequestExample3": { + "summary": "Filter for failed operations", + "description": "To return only information about failed operations, run `POST /_bulk?filter_path=items.*.error`.\n", + "value": "{ \"update\": {\"_id\": \"5\", \"_index\": \"index1\"} }\n{ \"doc\": {\"my_field\": \"foo\"} }\n{ \"update\": {\"_id\": \"6\", \"_index\": \"index1\"} }\n{ \"doc\": {\"my_field\": \"foo\"} }\n{ \"create\": {\"_id\": \"7\", \"_index\": \"index1\"} }\n{ \"my_field\": \"foo\" }" + }, + "BulkRequestExample4": { + "summary": "Dynamic templates", + "description": "Run `POST /_bulk` to perform a bulk request that consists of index and create actions with the `dynamic_templates` parameter. The bulk request creates two new fields `work_location` and `home_location` with type `geo_point` according to the `dynamic_templates` parameter. However, the `raw_location` field is created using default dynamic mapping rules, as a text field in that case since it is supplied as a string in the JSON document.\n", + "value": "{ \"index\" : { \"_index\" : \"my_index\", \"_id\" : \"1\", \"dynamic_templates\": {\"work_location\": \"geo_point\"}} }\n{ \"field\" : \"value1\", \"work_location\": \"41.12,-71.34\", \"raw_location\": \"41.12,-71.34\"}\n{ \"create\" : { \"_index\" : \"my_index\", \"_id\" : \"2\", \"dynamic_templates\": {\"home_location\": \"geo_point\"}} }\n{ \"field\" : \"value2\", \"home_location\": \"41.12,-71.34\"}" + } } } }, @@ -108159,6 +110380,12 @@ "$ref": "#/components/schemas/_types:ScrollIds" } } + }, + "examples": { + "ClearScrollRequestExample1": { + "description": "Run `DELETE /_search/scroll` to clear the search context and results for a scrolling search.", + "value": "{\n \"scroll_id\": \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}" + } } } } @@ -108185,6 +110412,12 @@ "type": "number" } } + }, + "examples": { + "ClusterAllocationExplainRequestExample1": { + "description": "Run `GET _cluster/allocation/explain` to get an explanation for a shard's current allocation.", + "value": "{\n \"index\": \"my-index-000001\",\n \"shard\": 0,\n \"primary\": false,\n \"current_node\": \"my-node\"\n}" + } } } } @@ -108212,6 +110445,17 @@ "required": [ "template" ] + }, + "examples": { + "ClusterPutComponentTemplateRequestExample1": { + "summary": "Create a template", + "value": "{\n \"template\": null,\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"mappings\": {\n \"_source\": {\n \"enabled\": false\n },\n \"properties\": {\n \"host_name\": {\n \"type\": \"keyword\"\n },\n \"created_at\": {\n \"type\": \"date\",\n \"format\": \"EEE MMM dd HH:mm:ss Z yyyy\"\n }\n }\n }\n}" + }, + "ClusterPutComponentTemplateRequestExample2": { + "summary": "Create a template with aliases", + "description": "You can include index aliases in a component template. During index creation, the `{index}` placeholder in the alias name will be replaced with the actual index name that the template gets applied to.\n", + "value": "{\n \"template\": null,\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"aliases\": {\n \"alias1\": {},\n \"alias2\": {\n \"filter\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"routing\": \"shard-1\"\n },\n \"{index}-alias\": {}\n }\n}" + } } } }, @@ -108242,6 +110486,14 @@ "type": "string" } } + }, + "examples": { + "ConnectorPutRequestExample1": { + "value": "{\n \"index_name\": \"search-google-drive\",\n \"name\": \"My Connector\",\n \"service_type\": \"google_drive\"\n}" + }, + "ConnectorPutRequestExample2": { + "value": "{\n \"index_name\": \"search-google-drive\",\n \"name\": \"My Connector\",\n \"description\": \"My Connector to sync data to Elastic index from Google Drive\",\n \"service_type\": \"google_drive\",\n \"language\": \"english\"\n}" + } } } } @@ -108256,6 +110508,12 @@ "$ref": "#/components/schemas/_types.query_dsl:QueryContainer" } } + }, + "examples": { + "CountRequestExample1": { + "description": "Run `GET /my-index-000001/_count?q=user:kimchy`. Alternatively, run `GET /my-index-000001/_count` with the same query in the request body. Both requests count the number of documents in `my-index-000001` with a `user.id` of `kimchy`.\n", + "value": "{\n \"query\" : {\n \"term\" : { \"user.id\" : \"kimchy\" }\n }\n}" + } } } } @@ -108265,6 +110523,12 @@ "application/json": { "schema": { "type": "object" + }, + "examples": { + "CreateRequestExample1": { + "description": "Run `PUT my-index-000001/_create/1` to index a document into the `my-index-000001` index if no document with that ID exists.\n", + "value": "{\n \"@timestamp\": \"2099-11-15T13:12:00\",\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n}" + } } } }, @@ -108357,6 +110621,18 @@ "required": [ "query" ] + }, + "examples": { + "EqlSearchRequestExample1": { + "summary": "Basic query", + "description": "Run `GET /my-data-stream/_eql/search` to search for events that have a `process.name` of `cmd.exe` and a `process.pid` other than `2013`.\n", + "value": "{\n \"query\": \"\"\"\n process where (process.name == \"cmd.exe\" and process.pid != 2013)\n \"\"\"\n}" + }, + "EqlSearchRequestExample2": { + "summary": "Sequence query", + "description": "Run `GET /my-data-stream/_eql/search` to search for a sequence of events. The sequence starts with an event with an `event.category` of `file`, a `file.name` of `cmd.exe`, and a `process.pid` other than `2013`. It is followed by an event with an `event.category` of `process` and a `process.executable` that contains the substring `regsvr32`. These events must also share the same `process.pid` value.\n", + "value": "{\n \"query\": \"\"\"\n sequence by process.pid\n [ file where file.name == \"cmd.exe\" and process.pid != 2013 ]\n [ process where stringContains(process.executable, \"regsvr32\") ]\n \"\"\"\n}" + } } } }, @@ -108372,6 +110648,12 @@ "$ref": "#/components/schemas/_types.query_dsl:QueryContainer" } } + }, + "examples": { + "ExplainRequestExample1": { + "description": "Run `GET /my-index-000001/_explain/0` with the request body. Alternatively, run `GET /my-index-000001/_explain/0?q=message:elasticsearch`\n", + "value": "{\n \"query\" : {\n \"match\" : { \"message\" : \"elasticsearch\" }\n }\n}" + } } } } @@ -108392,6 +110674,12 @@ "$ref": "#/components/schemas/_types.mapping:RuntimeFields" } } + }, + "examples": { + "FieldCapabilitiesRequestExample1": { + "description": "Run `POST my-index-*/_field_caps?fields=rating` to get field capabilities and filter indices with a query. Indices that rewrite the provided filter to `match_none` on every shard will be filtered from the response.\n", + "value": "{\n \"index_filter\": {\n \"range\": {\n \"@timestamp\": {\n \"gte\": \"2018\"\n }\n }\n }\n}" + } } } } @@ -108585,6 +110873,12 @@ } } } + }, + "examples": { + "GraphExploreRequestExample1": { + "description": "Run `POST clicklogs/_graph/explore` for a basic exploration An initial graph explore query typically begins with a query to identify strongly related terms. Seed the exploration with a query. This example is searching `clicklogs` for people who searched for the term `midi`.Identify the vertices to include in the graph. This example is looking for product codes that are significantly associated with searches for `midi`. Find the connections. This example is looking for other search terms that led people to click on the products that are associated with searches for `midi`.\n", + "value": "{\n \"query\": {\n \"match\": {\n \"query.raw\": \"midi\"\n }\n },\n \"vertices\": [\n {\n \"field\": \"product\"\n }\n ],\n \"connections\": {\n \"vertices\": [\n {\n \"field\": \"query.raw\"\n }\n ]\n }\n}" + } } } } @@ -108594,6 +110888,18 @@ "application/json": { "schema": { "type": "object" + }, + "examples": { + "IndexRequestExample1": { + "summary": "Automate document IDs", + "description": "Run `POST my-index-000001/_doc/` to index a document. When you use the `POST //_doc/` request format, the `op_type` is automatically set to `create` and the index operation generates a unique ID for the document.\n", + "value": "{\n \"@timestamp\": \"2099-11-15T13:12:00\",\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n}" + }, + "IndexRequestExample2": { + "summary": "Define document IDs", + "description": "Run `PUT my-index-000001/_doc/1` to insert a JSON document into the `my-index-000001` index with an `_id` of 1.\n", + "value": "{\n \"@timestamp\": \"2099-11-15T13:12:00\",\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n}" + } } } }, @@ -108648,6 +110954,43 @@ "$ref": "#/components/schemas/_types.analysis:Tokenizer" } } + }, + "examples": { + "indicesAnalyzeRequestExample1": { + "summary": "No index specified", + "description": "You can apply any of the built-in analyzers to the text string without specifying an index.", + "value": "{\n \"analyzer\": \"standard\",\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample2": { + "summary": "An array of text strings", + "description": "If the text parameter is provided as array of strings, it is analyzed as a multi-value field.", + "value": "{\n \"analyzer\": \"standard\",\n \"text\": [\n \"this is a test\",\n \"the second text\"\n ]\n}" + }, + "indicesAnalyzeRequestExample3": { + "summary": "Custom analyzer example 1", + "description": "You can test a custom transient analyzer built from tokenizers, token filters, and char filters. Token filters use the filter parameter.", + "value": "{\n \"tokenizer\": \"keyword\",\n \"filter\": [\n \"lowercase\"\n ],\n \"char_filter\": [\n \"html_strip\"\n ],\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample4": { + "summary": "Custom analyzer example 2", + "description": "Custom tokenizers, token filters, and character filters can be specified in the request body.", + "value": "{\n \"tokenizer\": \"whitespace\",\n \"filter\": [\n \"lowercase\",\n {\n \"type\": \"stop\",\n \"stopwords\": [\n \"a\",\n \"is\",\n \"this\"\n ]\n }\n ],\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample5": { + "summary": "Derive analyzer from field mapping", + "description": "Run `GET /analyze_sample/_analyze` to run an analysis on the text using the default index analyzer associated with the `analyze_sample` index. Alternatively, the analyzer can be derived based on a field mapping.", + "value": "{\n \"field\": \"obj1.field1\",\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample6": { + "summary": "Normalizer", + "description": "Run `GET /analyze_sample/_analyze` and supply a normalizer for a keyword field if there is a normalizer associated with the specified index.", + "value": "{\n \"normalizer\": \"my_normalizer\",\n \"text\": \"BaR\"\n}" + }, + "indicesAnalyzeRequestExample7": { + "summary": "Explain analysis", + "description": "If you want to get more advanced details, set `explain` to `true`. It will output all token attributes for each token. You can filter token attributes you want to output by setting the `attributes` option. NOTE: The format of the additional detail information is labelled as experimental in Lucene and it may change in the future.\n", + "value": "{\n \"tokenizer\": \"standard\",\n \"filter\": [\n \"snowball\"\n ],\n \"text\": \"detailed output\",\n \"explain\": true,\n \"attributes\": [\n \"keyword\"\n ]\n}" + } } } } @@ -108673,6 +111016,13 @@ } } } + }, + "examples": { + "indicesCloneRequestExample1": { + "summary": "Clone an existing index.", + "description": "Clone `my_source_index` into a new index called `my_target_index` with `POST /my_source_index/_clone/my_target_index`. The API accepts `settings` and `aliases` parameters for the target index.\n", + "value": "{\n \"settings\": {\n \"index.number_of_shards\": 5\n },\n \"aliases\": {\n \"my_search_indices\": {}\n }\n}" + } } } } @@ -108710,6 +111060,11 @@ "$ref": "#/components/schemas/_types:Routing" } } + }, + "examples": { + "indicesPutAliasRequestExample1": { + "value": "{\n \"actions\": [\n {\n \"add\": {\n \"index\": \"my-data-stream\",\n \"alias\": \"my-alias\"\n }\n }\n ]\n}" + } } } } @@ -108762,6 +111117,17 @@ "type": "boolean" } } + }, + "examples": { + "IndicesPutIndexTemplateRequestExample1": { + "summary": "Create a template", + "value": "{\n \"index_patterns\" : [\"template*\"],\n \"priority\" : 1,\n \"template\": {\n \"settings\" : {\n \"number_of_shards\" : 2\n }\n }\n}" + }, + "IndicesPutIndexTemplateRequestExample2": { + "summary": "Create a template with aliases", + "description": "You can include index aliases in an index template.\nDuring index creation, the `{index}` placeholder in the alias name will be replaced with the actual index name that the template gets applied to.\n", + "value": "{\n \"index_patterns\": [\n \"template*\"\n ],\n \"template\": {\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"aliases\": {\n \"alias1\": {},\n \"alias2\": {\n \"filter\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"routing\": \"shard-1\"\n },\n \"{index}-alias\": {}\n }\n }\n}" + } } } }, @@ -108834,6 +111200,13 @@ "$ref": "#/components/schemas/_types.mapping:RuntimeFields" } } + }, + "examples": { + "indicesPutMappingRequestExample1": { + "summary": "Update multiple targets", + "description": "The update mapping API can be applied to multiple data streams or indices with a single request. For example, run `PUT /my-index-000001,my-index-000002/_mapping` to update mappings for the `my-index-000001` and `my-index-000002` indices at the same time.\n", + "value": "{\n \"properties\": {\n \"user\": {\n \"properties\": {\n \"name\": {\n \"type\": \"keyword\"\n }\n }\n }\n }\n}" + } } } }, @@ -108844,6 +111217,22 @@ "application/json": { "schema": { "$ref": "#/components/schemas/indices._types:IndexSettings" + }, + "examples": { + "IndicesPutSettingsRequestExample1": { + "summary": "Change a dynamic index setting", + "value": "{\n \"index\" : {\n \"number_of_replicas\" : 2\n }\n}" + }, + "indicesPutSettingsRequestExample2": { + "summary": "Reset an index setting", + "description": "To revert a setting to the default value, use `null`.", + "value": "{\n \"index\" : {\n \"refresh_interval\" : null\n }\n}" + }, + "indicesPutSettingsRequestExample3": { + "summary": "Update index analysis", + "description": "To add an analyzer, you must close the index, define the analyzer, then reopen the index.", + "value": "{\n \"analysis\" : {\n \"analyzer\":{\n \"content\":{\n \"type\":\"custom\",\n \"tokenizer\":\"whitespace\"\n }\n }\n }\n}\n\nPOST /my-index-000001/_open" + } } } }, @@ -108890,6 +111279,17 @@ "$ref": "#/components/schemas/_types:VersionNumber" } } + }, + "examples": { + "indicesPutTemplateRequestExample1": { + "summary": "Create an index template", + "value": "{\n \"index_patterns\": [\n \"te*\",\n \"bar*\"\n ],\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"mappings\": {\n \"_source\": {\n \"enabled\": false\n }\n },\n \"properties\": {\n \"host_name\": {\n \"type\": \"keyword\"\n },\n \"created_at\": {\n \"type\": \"date\",\n \"format\": \"EEE MMM dd HH:mm:ss Z yyyy\"\n }\n }\n}" + }, + "indicesPutTemplateRequestExample2": { + "summary": "Create an index template with aliases", + "description": "You can include index aliases in an index template. During index creation, the `{index}` placeholder in the alias name will be replaced with the actual index name that the template gets applied to.\n", + "value": "{\n \"index_patterns\": [\n \"te*\"\n ],\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"aliases\": {\n \"alias1\": {},\n \"alias2\": {\n \"filter\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"routing\": \"shard-1\"\n },\n \"{index}-alias\": {}\n }\n}" + } } } }, @@ -108922,6 +111322,12 @@ } } } + }, + "examples": { + "indicesRolloverRequestExample1": { + "summary": "Create a new index for a data stream.", + "value": "{\n \"conditions\": {\n \"max_age\": \"7d\",\n \"max_docs\": 1000,\n \"max_primary_shard_size\": \"50gb\",\n \"max_primary_shard_docs\": \"2000\"\n }\n}" + } } } } @@ -108947,6 +111353,12 @@ } } } + }, + "examples": { + "indicesShrinkRequestExample1": { + "summary": "Shrink an existing index into a new index with fewer primary shards.", + "value": "{\n \"settings\": {\n \"index.routing.allocation.require._name\": null,\n \"index.blocks.write\": null\n }\n}" + } } } } @@ -108999,6 +111411,12 @@ "type": "boolean" } } + }, + "examples": { + "indicesSimulateTemplateRequestExample1": { + "description": "To see what settings will be applied by a template before you add it to the cluster, you can pass a template configuration in the request body. The specified template is used for the simulation if it has a higher priority than existing templates.\n", + "value": "{\n \"index_patterns\": [\"my-index-*\"],\n \"composed_of\": [\"ct2\"],\n \"priority\": 10,\n \"template\": {\n \"settings\": {\n \"index.number_of_replicas\": 1\n }\n }\n}" + } } } } @@ -109024,6 +111442,12 @@ } } } + }, + "examples": { + "indicesSplitRequestExample1": { + "description": "Split an existing index into a new index with more primary shards.", + "value": "{\n \"settings\": {\n \"index.number_of_shards\": 2\n }\n}" + } } } } @@ -109073,6 +111497,28 @@ "required": [ "input" ] + }, + "examples": { + "InferenceRequestExample1": { + "summary": "Completion task", + "description": "Run `POST _inference/completion/openai_chat_completions` to perform a completion on the example question.", + "value": "{\n \"input\": \"What is Elastic?\"\n}" + }, + "InferenceRequestExample2": { + "summary": "Rerank task", + "description": "Run `POST _inference/rerank/cohere_rerank` to perform reranking on the example input.", + "value": "{\n \"input\": [\"luke\", \"like\", \"leia\", \"chewy\",\"r2d2\", \"star\", \"wars\"],\n \"query\": \"star wars main character\"\n}" + }, + "InferenceRequestExample3": { + "summary": "Sparse embedding task", + "description": "Run `POST _inference/sparse_embedding/my-elser-model` to perform sparse embedding on the example sentence.", + "value": "{\n \"input\": \"The sky above the port was the color of television tuned to a dead channel.\"\n}" + }, + "InferenceRequestExample4": { + "summary": "Text embedding task", + "description": "Run `POST _inference/text_embedding/my-cohere-endpoint` to perform text embedding on the example sentence using the Cohere integration,", + "value": "{\n \"input\": \"The sky above the port was the color of television tuned to a dead channel.\",\n \"task_settings\": {\n \"input_type\": \"ingest\"\n }\n}" + } } } } @@ -109111,6 +111557,13 @@ "required": [ "input" ] + }, + "examples": { + "StreamInferenceRequestExample1": { + "summary": "Perform a completion task", + "description": "Run `POST _inference/completion/openai-completion/_stream` to perform a completion on the example question with streaming.", + "value": "{\n \"input\": \"What is Elastic?\"\n}" + } } } } @@ -109199,6 +111652,13 @@ "required": [ "docs" ] + }, + "examples": { + "SimulatePipelineRequestExample1": { + "summary": "Run an ingest pipeline against a set of provided documents.", + "description": "You can specify the used pipeline either in the request body or as a path parameter.", + "value": "{\n \"pipeline\" :\n {\n \"description\": \"_description\",\n \"processors\": [\n {\n \"set\" : {\n \"field\" : \"field2\",\n \"value\" : \"_value\"\n }\n }\n ]\n },\n \"docs\": [\n {\n \"_index\": \"index\",\n \"_id\": \"id\",\n \"_source\": {\n \"foo\": \"bar\"\n }\n },\n {\n \"_index\": \"index\",\n \"_id\": \"id\",\n \"_source\": {\n \"foo\": \"rab\"\n }\n }\n ]\n}" + } } } }, @@ -109268,6 +111728,12 @@ } } } + }, + "examples": { + "PostLicenseRequestExample1": { + "description": "Run `PUT _license` to update to a basic license. NOTE: These values are invalid; you must substitute the appropriate contents from your license file.\n", + "value": "{\n \"licenses\": [\n {\n \"uid\":\"893361dc-9749-4997-93cb-802e3d7fa4xx\",\n \"type\":\"basic\",\n \"issue_date_in_millis\":1411948800000,\n \"expiry_date_in_millis\":1914278399999,\n \"max_nodes\":1,\n \"issued_to\":\"issuedTo\",\n \"issuer\":\"issuer\",\n \"signature\":\"xx\"\n }\n ]\n}" + } } } } @@ -109289,6 +111755,28 @@ "$ref": "#/components/schemas/_types:Ids" } } + }, + "examples": { + "MultiGetRequestExample1": { + "summary": "Get documents by ID", + "description": "Run `GET /my-index-000001/_mget`. When you specify an index in the request URI, only the document IDs are required in the request body.\n", + "value": "{\n \"docs\": [\n {\n \"_id\": \"1\"\n },\n {\n \"_id\": \"2\"\n }\n ]\n}" + }, + "MultiGetRequestExample2": { + "summary": "Filter source fields", + "description": "Run `GET /_mget`. This request sets `_source` to `false` for document 1 to exclude the source entirely. It retrieves `field3` and `field4` from document 2. It retrieves the `user` field from document 3 but filters out the `user.location` field.\n", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"_source\": false\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"_source\": [ \"field3\", \"field4\" ]\n },\n {\n \"_index\": \"test\",\n \"_id\": \"3\",\n \"_source\": {\n \"include\": [ \"user\" ],\n \"exclude\": [ \"user.location\" ]\n }\n }\n ]\n}" + }, + "MultiGetRequestExample3": { + "summary": "Get stored fields", + "description": "Run `GET /_mget`. This request retrieves `field1` and `field2` from document 1 and `field3` and `field4` from document 2.\n", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"stored_fields\": [ \"field1\", \"field2\" ]\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"stored_fields\": [ \"field3\", \"field4\" ]\n }\n ]\n}" + }, + "MultiGetRequestExample4": { + "summary": "Document routing", + "description": "Run `GET /_mget?routing=key1`. If routing is used during indexing, you need to specify the routing value to retrieve documents. This request fetches `test/_doc/2` from the shard corresponding to routing key `key1`. It fetches `test/_doc/1` from the shard corresponding to routing key `key2`.\n", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"routing\": \"key2\"\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\"\n }\n ]\n}" + } } } }, @@ -109347,6 +111835,12 @@ "type": "boolean" } } + }, + "examples": { + "MlExplainDataFrameAnalyticsRequestExample1": { + "description": "Run `POST _ml/data_frame/analytics/_explain` to explain a data frame analytics job configuration.", + "value": "{\n \"source\": {\n \"index\": \"houses_sold_last_10_yrs\"\n },\n \"analysis\": {\n \"regression\": {\n \"dependent_variable\": \"price\"\n }\n }\n}" + } } } } @@ -109640,6 +112134,12 @@ "items": { "$ref": "#/components/schemas/_global.msearch_template:RequestItem" } + }, + "examples": { + "MultiSearchTemplateRequestExample1": { + "description": "Run `GET my-index/_msearch/template` to run multiple templated searches.", + "value": "{ }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}" + } } } }, @@ -109666,6 +112166,23 @@ } } } + }, + "examples": { + "MultiTermVectorsRequestExample1": { + "summary": "Get multiple term vectors", + "description": "Run `POST /my-index-000001/_mtermvectors`. When you specify an index in the request URI, the index does not need to be specified for each documents in the request body.\n", + "value": "{\n \"docs\": [\n {\n \"_id\": \"2\",\n \"fields\": [\n \"message\"\n ],\n \"term_statistics\": true\n },\n {\n \"_id\": \"1\"\n }\n ]\n}" + }, + "MultiTermVectorsRequestExample2": { + "summary": "Simplified syntax", + "description": "Run `POST /my-index-000001/_mtermvectors`. If all requested documents are in same index and the parameters are the same, you can use a simplified syntax.\n", + "value": "{\n \"ids\": [ \"1\", \"2\" ],\n \"parameters\": {\n \"fields\": [\n \"message\"\n ],\n \"term_statistics\": true\n }\n}" + }, + "MultiTermVectorsRequestExample3": { + "summary": "Artificial documents", + "description": "Run `POST /_mtermvectors` to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified `_index`.\n", + "value": "{\n \"docs\": [\n {\n \"_index\": \"my-index-000001\",\n \"doc\" : {\n \"message\" : \"test test test\"\n }\n },\n {\n \"_index\": \"my-index-000001\",\n \"doc\" : {\n \"message\" : \"Another test ...\"\n }\n }\n ]\n}" + } } } } @@ -109680,6 +112197,12 @@ "$ref": "#/components/schemas/_types:Password" } } + }, + "examples": { + "ReloadSecureSettingsRequestExample1": { + "description": "Run `POST _nodes/reload_secure_settings` to reload the keystore on nodes in the cluster.", + "value": "{\n \"secure_settings_password\": \"keystore-password\"\n}" + } } } } @@ -109697,6 +112220,18 @@ "required": [ "script" ] + }, + "examples": { + "PutScriptRequestExample1": { + "summary": "Create a search template", + "description": "Run `PUT _scripts/my-search-template` to create a search template.\n", + "value": "{\n \"script\": {\n \"lang\": \"mustache\",\n \"source\": {\n \"query\": {\n \"match\": {\n \"message\": \"{{query_string}}\"\n }\n },\n \"from\": \"{{from}}\",\n \"size\": \"{{size}}\"\n }\n }\n}" + }, + "PutScriptRequestExample2": { + "summary": "Create a stored script", + "description": "Run `PUT _scripts/my-stored-script` to create a stored script.\n", + "value": "{\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"Math.log(_score * 2) + params['my_modifier']\"\n }\n}" + } } } }, @@ -109751,6 +112286,12 @@ "type": "string" } } + }, + "examples": { + "RenderSearchTemplateRequestExample1": { + "description": "Run `POST _render/template`", + "value": "{\n \"id\": \"my-search-template\",\n \"params\": {\n \"query_string\": \"hello world\",\n \"from\": 20,\n \"size\": 10\n }\n}" + } } } } @@ -109779,6 +112320,12 @@ "type": "number" } } + }, + "examples": { + "RollupSearchRequestExample1": { + "description": "Search rolled up data stored in `sensor_rollup` with `GET /sensor_rollup/_rollup_search`", + "value": "{\n \"size\": 0,\n \"aggregations\": {\n \"max_temperature\": {\n \"max\": {\n \"field\": \"temperature\"\n }\n }\n }\n}" + } } } }, @@ -109800,6 +112347,23 @@ "$ref": "#/components/schemas/_types:Script" } } + }, + "examples": { + "ExecutePainlessScriptRequestExample1": { + "summary": "Test context", + "description": "Run `POST /_scripts/painless/_execute`. The `painless_test` context is the default context. It runs scripts without additional parameters. The only variable that is available is `params`, which can be used to access user defined values. The result of the script is always converted to a string.\n", + "value": "{\n \"script\": {\n \"source\": \"params.count / params.total\",\n \"params\": {\n \"count\": 100.0,\n \"total\": 1000.0\n }\n }\n}" + }, + "ExecutePainlessScriptRequestExample2": { + "summary": "Filter context", + "description": "Run `POST /_scripts/painless/_execute` with a `filter` context. It treats scripts as if they were run inside a script query. For testing purposes, a document must be provided so that it will be temporarily indexed in-memory and is accessible from the script. More precisely, the `_source`, stored fields, and doc values of such a document are available to the script being tested.\n", + "value": "{\n \"script\": {\n \"source\": \"doc['field'].value.length() <= params.max_length\",\n \"params\": {\n \"max_length\": 4\n }\n },\n \"context\": \"filter\",\n \"context_setup\": {\n \"index\": \"my-index-000001\",\n \"document\": {\n \"field\": \"four\"\n }\n }\n}" + }, + "ExecutePainlessScriptRequestExample3": { + "summary": "Score context", + "description": "Run `POST /_scripts/painless/_execute` with a `score` context. It treats scripts as if they were run inside a `script_score` function in a `function_score` query.\n", + "value": "{\n \"script\": {\n \"source\": \"doc['rank'].value / params.max_rank\",\n \"params\": {\n \"max_rank\": 5.0\n }\n },\n \"context\": \"score\",\n \"context_setup\": {\n \"index\": \"my-index-000001\",\n \"document\": {\n \"rank\": 4\n }\n }\n}" + } } } } @@ -109820,6 +112384,12 @@ "required": [ "scroll_id" ] + }, + "examples": { + "ScrollRequestExample1": { + "description": "Run `GET /_search/scroll` to get the next batch of results for a scrolling search.", + "value": "{\n \"scroll_id\" : \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}" + } } } } @@ -110008,6 +112578,23 @@ } } } + }, + "examples": { + "SearchRequestExample1": { + "summary": "A simple term search", + "description": "Run `GET /my-index-000001/_search?from=40&size=20` to run a search.\n", + "value": "{\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n}" + }, + "SearchRequestExample2": { + "summary": "A point in time search", + "description": "Run `POST /_search` to run a point in time search. The `id` parameter tells Elasticsearch to run the request using contexts from this open point in time. The `keep_alive` parameter tells Elasticsearch how long it should extend the time to live of the point in time.\n", + "value": "{\n \"size\": 100, \n \"query\": {\n \"match\" : {\n \"title\" : \"elasticsearch\"\n }\n },\n \"pit\": {\n \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==\", \n \"keep_alive\": \"1m\" \n }\n}" + }, + "SearchRequestExample3": { + "summary": "Search slicing", + "description": "When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently. The result from running the first `GET /_search` request returns documents belonging to the first slice (`id: 0`). If you run a second request with `id` set to `1', it returns documents in the second slice. Since the maximum number of slices is set to `2`, the union of the results is equivalent to the results of a point-in-time search without slicing.\n", + "value": "{\n \"slice\": {\n \"id\": 0, \n \"max\": 2 \n },\n \"query\": {\n \"match\": {\n \"message\": \"foo\"\n }\n },\n \"pit\": {\n \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==\"\n }\n}" + } } } } @@ -110026,6 +112613,12 @@ } } } + }, + "examples": { + "SearchApplicationsSearchRequestExample1": { + "description": "Use `POST _application/search_application/my-app/_search` to run a search against a search application called `my-app` that uses a search template.", + "value": "{\n \"params\": {\n \"query_string\": \"my first query\",\n \"text_fields\": [\n {\"name\": \"title\", \"boost\": 5},\n {\"name\": \"description\", \"boost\": 1}\n ]\n }\n}" + } } } } @@ -110089,6 +112682,12 @@ "type": "boolean" } } + }, + "examples": { + "SearchMvtRequestExample1": { + "description": "Run `GET museums/_mvt/location/13/4207/2692` to search an index for `location` values that intersect the `13/4207/2692` vector tile.\n", + "value": "{\n \"grid_agg\": \"geotile\",\n \"grid_precision\": 2,\n \"fields\": [\n \"name\",\n \"price\"\n ],\n \"query\": {\n \"term\": {\n \"included\": true\n }\n },\n \"aggs\": {\n \"min_price\": {\n \"min\": {\n \"field\": \"price\"\n }\n },\n \"max_price\": {\n \"max\": {\n \"field\": \"price\"\n }\n },\n \"avg_price\": {\n \"avg\": {\n \"field\": \"price\"\n }\n }\n }\n}" + } } } } @@ -110122,6 +112721,12 @@ "type": "string" } } + }, + "examples": { + "SearchTemplateRequestExample1": { + "description": "Run `GET my-index/_search/template` to run a search with a search template.\n", + "value": "{\n \"id\": \"my-search-template\",\n \"params\": {\n \"query_string\": \"hello world\",\n \"from\": 0,\n \"size\": 10\n }\n}" + } } } }, @@ -110141,6 +112746,12 @@ "type": "string" } } + }, + "examples": { + "SecurityChangePasswordRequestExample1": { + "description": "Run `POST /_security/user/jacknich/_password` to update the password for the `jacknich` user.\n", + "value": "{\n \"password\" : \"new-test-password\"\n}" + } } } }, @@ -110172,6 +112783,12 @@ "$ref": "#/components/schemas/_types:Metadata" } } + }, + "examples": { + "SecurityCreateApiKeyRequestExample1": { + "description": "Run `POST /_security/api_key` to create an API key. If `expiration` is not provided, the API keys do not expire. If `role_descriptors` is not provided, the permissions of the authenticated user are applied.\n", + "value": "{\n \"name\": \"my-api-key\",\n \"expiration\": \"1d\", \n \"role_descriptors\": { \n \"role-a\": {\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [\"index-a*\"],\n \"privileges\": [\"read\"]\n }\n ]\n },\n \"role-b\": {\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [\"index-b*\"],\n \"privileges\": [\"all\"]\n }\n ]\n }\n },\n \"metadata\": {\n \"application\": \"my-application\",\n \"environment\": {\n \"level\": 1,\n \"trusted\": true,\n \"tags\": [\"dev\", \"staging\"]\n }\n }\n}" + } } } }, @@ -110203,6 +112820,12 @@ } } } + }, + "examples": { + "SecurityHasPrivilegesRequestExample1": { + "description": "Run `GET /_security/user/_has_privileges` to check whether the current user has a specific set of cluster, index, and application privileges.", + "value": "{\n \"cluster\": [ \"monitor\", \"manage\" ],\n \"index\" : [\n {\n \"names\": [ \"suppliers\", \"products\" ],\n \"privileges\": [ \"read\" ]\n },\n {\n \"names\": [ \"inventory\" ],\n \"privileges\" : [ \"read\", \"write\" ]\n }\n ],\n \"application\": [\n {\n \"application\": \"inventory_manager\",\n \"privileges\" : [ \"read\", \"data:write/inventory\" ],\n \"resources\" : [ \"product/1852563\" ]\n }\n ]\n}" + } } } }, @@ -110229,6 +112852,12 @@ "uids", "privileges" ] + }, + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/profile/_has_privileges` to check whether the two users associated with the specified profiles have all the requested set of cluster, index, and application privileges.\n", + "value": "{\n \"uids\": [\n \"u_LQPnxDxEjIH0GOUoFkZr5Y57YUwSkL9Joiq-g4OCbPc_0\",\n \"u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1\",\n \"u_does-not-exist_0\"\n ],\n \"privileges\": {\n \"cluster\": [ \"monitor\", \"create_snapshot\", \"manage_ml\" ],\n \"index\" : [\n {\n \"names\": [ \"suppliers\", \"products\" ],\n \"privileges\": [ \"create_doc\"]\n },\n {\n \"names\": [ \"inventory\" ],\n \"privileges\" : [ \"read\", \"write\" ]\n }\n ],\n \"application\": [\n {\n \"application\": \"inventory_manager\",\n \"privileges\" : [ \"read\", \"data:write/inventory\" ],\n \"resources\" : [ \"product/1852563\" ]\n }\n ]\n }\n}" + } } } }, @@ -110245,6 +112874,18 @@ "$ref": "#/components/schemas/security.put_privileges:Actions" } } + }, + "examples": { + "SecurityPutPrivilegesRequestExample1": { + "summary": "Add a privilege", + "description": "Run `PUT /_security/privilege` to add a single application privilege. The wildcard (`*`) means that this privilege grants access to all actions that start with `data:read/`. Elasticsearch does not assign any meaning to these actions. However, if the request includes an application privilege such as `data:read/users` or `data:read/settings`, the has privileges API respects the use of a wildcard and returns `true`.\n", + "value": "{\n \"myapp\": {\n \"read\": {\n \"actions\": [ \n \"data:read/*\" , \n \"action:login\" ],\n \"metadata\": { \n \"description\": \"Read access to myapp\"\n }\n }\n }\n}" + }, + "SecurityPutPrivilegesRequestExample2": { + "summary": "Add multiple privileges", + "description": "Run `PUT /_security/privilege` to add multiple application privileges.\n", + "value": "{\n \"app01\": {\n \"read\": {\n \"actions\": [ \"action:login\", \"data:read/*\" ]\n },\n \"write\": {\n \"actions\": [ \"action:login\", \"data:write/*\" ]\n }\n },\n \"app02\": {\n \"all\": {\n \"actions\": [ \"*\" ]\n }\n }\n}" + } } } }, @@ -110323,6 +112964,23 @@ } } } + }, + "examples": { + "SecurityPutRoleRequestExample1": { + "summary": "Role example 1", + "description": "Run `POST /_security/role/my_admin_role` to create a role.", + "value": "{\n \"description\": \"Grants full access to all management features within the cluster.\",\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [ \"index1\", \"index2\" ],\n \"privileges\": [\"all\"],\n \"field_security\" : { // optional\n \"grant\" : [ \"title\", \"body\" ]\n },\n \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\" // optional\n }\n ],\n \"applications\": [\n {\n \"application\": \"myapp\",\n \"privileges\": [ \"admin\", \"read\" ],\n \"resources\": [ \"*\" ]\n }\n ],\n \"run_as\": [ \"other_user\" ], // optional\n \"metadata\" : { // optional\n \"version\" : 1\n }\n}" + }, + "SecurityPutRoleRequestExample2": { + "summary": "Role example 2", + "description": "Run `POST /_security/role/cli_or_drivers_minimal` to configure a role that can run SQL in JDBC.", + "value": "{\n \"cluster\": [\"cluster:monitor/main\"],\n \"indices\": [\n {\n \"names\": [\"test\"],\n \"privileges\": [\"read\", \"indices:admin/get\"]\n }\n ]\n}" + }, + "SecurityPutRoleRequestExample3": { + "summary": "Role example 3", + "description": "Run `POST /_security/role/only_remote_access_role` to configure a role with remote indices and remote cluster privileges for a remote cluster.", + "value": "{\n \"remote_indices\": [\n {\n \"clusters\": [\"my_remote\"], \n \"names\": [\"logs*\"], \n \"privileges\": [\"read\", \"read_cross_cluster\", \"view_index_metadata\"] \n }\n ],\n \"remote_cluster\": [\n {\n \"clusters\": [\"my_remote\"], \n \"privileges\": [\"monitor_stats\"] \n }\n ]\n}" + } } } }, @@ -110365,6 +113023,53 @@ } } } + }, + "examples": { + "SecurityPutRoleMappingRequestExample1": { + "summary": "Roles for all users", + "description": "Run `POST /_security/role_mapping/mapping1` to assign the `user` role to all users.\n", + "value": "{\n \"roles\": [ \"user\"],\n \"enabled\": true, \n \"rules\": {\n \"field\" : { \"username\" : \"*\" }\n },\n \"metadata\" : { \n \"version\" : 1\n }\n}" + }, + "SecurityPutRoleMappingRequestExample2": { + "summary": "Roles for specific users", + "description": "Run `POST /_security/role_mapping/mapping2` to assign the \"user\" and \"admin\" roles to specific users.\n", + "value": "{\n \"roles\": [ \"user\", \"admin\" ],\n \"enabled\": true,\n \"rules\": {\n \"field\" : { \"username\" : [ \"esadmin01\", \"esadmin02\" ] }\n }\n}" + }, + "SecurityPutRoleMappingRequestExample3": { + "summary": "Roles for specific realms", + "description": "Run `POST /_security/role_mapping/mapping3` to match users who authenticated against a specific realm.\n", + "value": "{\n \"roles\": [ \"ldap-user\" ],\n \"enabled\": true,\n \"rules\": {\n \"field\" : { \"realm.name\" : \"ldap1\" }\n }\n}" + }, + "SecurityPutRoleMappingRequestExample4": { + "summary": "Roles for specific groups", + "description": "Run `POST /_security/role_mapping/mapping4` to match any user where either the username is `esadmin` or the user is in the `cn=admin,dc=example,dc=com group`. This example is useful when the group names in your identity management system (such as Active Directory, or a SAML Identity Provider) do not have a one-to-one correspondence with the names of roles in Elasticsearch. The role mapping is the means by which you link a group name with a role name.\n", + "value": "{\n \"roles\": [ \"superuser\" ],\n \"enabled\": true,\n \"rules\": {\n \"any\": [\n {\n \"field\": {\n \"username\": \"esadmin\"\n }\n },\n {\n \"field\": {\n \"groups\": \"cn=admins,dc=example,dc=com\"\n }\n }\n ]\n }\n}" + }, + "SecurityPutRoleMappingRequestExample5": { + "summary": "Roles for multiple groups", + "description": "Run `POST /_security/role_mapping/mapping5` to use an array syntax for the groups field when there are multiple groups. This pattern matches any of the groups (rather than all of the groups).\n", + "value": "{\n \"role_templates\": [\n {\n \"template\": { \"source\": \"{{#tojson}}groups{{/tojson}}\" }, \n \"format\" : \"json\" \n }\n ],\n \"rules\": {\n \"field\" : { \"realm.name\" : \"saml1\" }\n },\n \"enabled\": true\n}" + }, + "SecurityPutRoleMappingRequestExample6": { + "summary": "Templated roles for groups", + "description": "Run `POST /_security/role_mapping/mapping6` for rare cases when the names of your groups may be an exact match for the names of your Elasticsearch roles. This can be the case when your SAML Identity Provider includes its own \"group mapping\" feature and can be configured to release Elasticsearch role names in the user's SAML attributes. In these cases it is possible to use a template that treats the group names as role names.\nNOTE: This should only be done if you intend to define roles for all of the provided groups. Mapping a user to a large number of unnecessary or undefined roles is inefficient and can have a negative effect on system performance. If you only need to map a subset of the groups, you should do it by using explicit mappings.\nThe `tojson` mustache function is used to convert the list of group names into a valid JSON array. Because the template produces a JSON array, the `format` must be set to `json`.\n", + "value": "{\n \"role_templates\": [\n {\n \"template\": { \"source\": \"{{#tojson}}groups{{/tojson}}\" }, \n \"format\" : \"json\" \n }\n ],\n \"rules\": {\n \"field\" : { \"realm.name\" : \"saml1\" }\n },\n \"enabled\": true\n}" + }, + "SecurityPutRoleMappingRequestExample7": { + "summary": "Users in a LDAP sub-tree and realm", + "description": "Run `POST /_security/role_mapping/mapping7` to match users within a particular LDAP sub-tree in a specific realm.\n", + "value": "{\n \"roles\": [ \"ldap-example-user\" ],\n \"enabled\": true,\n \"rules\": {\n \"all\": [\n { \"field\" : { \"dn\" : \"*,ou=subtree,dc=example,dc=com\" } },\n { \"field\" : { \"realm.name\" : \"ldap1\" } }\n ]\n }\n}" + }, + "SecurityPutRoleMappingRequestExample8": { + "summary": "Complex roles", + "description": "Run `POST /_security/role_mapping/mapping8` to assign rules that are complex and include wildcard matching. For example, this mapping matches any user where all of these conditions are met: the Distinguished Name matches the pattern `*,ou=admin,dc=example,dc=com`, or the `username` is `es-admin`, or the `username` is `es-system`; the user is in the `cn=people,dc=example,dc=com` group; the user does not have a `terminated_date`.\n", + "value": "{\n \"roles\": [ \"superuser\" ],\n \"enabled\": true,\n \"rules\": {\n \"all\": [\n {\n \"any\": [\n {\n \"field\": {\n \"dn\": \"*,ou=admin,dc=example,dc=com\"\n }\n },\n {\n \"field\": {\n \"username\": [ \"es-admin\", \"es-system\" ]\n }\n }\n ]\n },\n {\n \"field\": {\n \"groups\": \"cn=people,dc=example,dc=com\"\n }\n },\n {\n \"except\": {\n \"field\": {\n \"metadata.terminated_date\": null\n }\n }\n }\n ]\n }\n}" + }, + "SecurityPutRoleMappingRequestExample9": { + "summary": "Templated roles", + "description": "Run `POST /_security/role_mapping/mapping9` to use templated roles to automatically map every user to their own custom role. In this example every user who authenticates using the `cloud-saml` realm will be automatically mapped to two roles: the `saml_user` role and a role that is their username prefixed with `_user_`. For example, the user `nwong` would be assigned the `saml_user` and `_user_nwong` roles.\n", + "value": "{\n \"rules\": { \"field\": { \"realm.name\": \"cloud-saml\" } },\n \"role_templates\": [\n { \"template\": { \"source\" : \"saml_user\" } }, \n { \"template\": { \"source\" : \"_user_{{username}}\" } }\n ],\n \"enabled\": true\n}" + } } } }, @@ -110428,6 +113133,12 @@ "type": "boolean" } } + }, + "examples": { + "SecurityPutUserRequestExample1": { + "description": "Run `POST /_security/user/jacknich` to activate a user profile.", + "value": "{\n \"password\" : \"l0ng-r4nd0m-p@ssw0rd\",\n \"roles\" : [ \"admin\", \"other_role1\" ],\n \"full_name\" : \"Jack Nicholson\",\n \"email\" : \"jacknich@example.com\",\n \"metadata\" : {\n \"intelligence\" : 7\n }\n}" + } } } }, @@ -110464,6 +113175,23 @@ "$ref": "#/components/schemas/_types:SortResults" } } + }, + "examples": { + "QueryApiKeysRequestExample1": { + "summary": "Query API keys by ID", + "description": "Run `GET /_security/_query/api_key?with_limited_by=true` to retrieve an API key by ID.", + "value": "{\n \"query\": {\n \"ids\": {\n \"values\": [\n \"VuaCfGcBCdbkQm-e5aOx\"\n ]\n }\n }\n}" + }, + "QueryApiKeysRequestExample2": { + "summary": "Query API keys with pagination", + "description": "Run `GET /_security/_query/api_key`. Use a `bool` query to issue complex logical conditions and use `from`, `size`, and `sort` to help paginate the result. For example, the API key name must begin with `app1-key-` and must not be `app1-key-01`. It must be owned by a username with the wildcard pattern `org-*-user` and the `environment` metadata field must have a `production` value. The offset to begin the search result is the twentieth (zero-based index) API key. The page size of the response is 10 API keys. The result is first sorted by creation date in descending order, then by name in ascending order.\n", + "value": "{\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"prefix\": {\n \"name\": \"app1-key-\" \n }\n },\n {\n \"term\": {\n \"invalidated\": \"false\" \n }\n }\n ],\n \"must_not\": [\n {\n \"term\": {\n \"name\": \"app1-key-01\" \n }\n }\n ],\n \"filter\": [\n {\n \"wildcard\": {\n \"username\": \"org-*-user\" \n }\n },\n {\n \"term\": {\n \"metadata.environment\": \"production\" \n }\n }\n ]\n }\n },\n \"from\": 20, \n \"size\": 10, \n \"sort\": [ \n { \"creation\": { \"order\": \"desc\", \"format\": \"date_time\" } },\n \"name\"\n ]\n}" + }, + "QueryApiKeysRequestExample3": { + "summary": "Query API keys by name", + "description": "Run `GET /_security/_query/api_key` to retrieve the API key by name.", + "value": "{\n \"query\": {\n \"term\": {\n \"name\": {\n \"value\": \"application-key-1\"\n }\n }\n }\n}" + } } } } @@ -110492,6 +113220,18 @@ "$ref": "#/components/schemas/_types:SortResults" } } + }, + "examples": { + "QueryRolesRequestExample1": { + "summary": "Query roles by name", + "description": "Run `POST /_security/_query/role` to lists all roles, sorted by the role name.", + "value": "{\n \"sort\": [\"name\"]\n}" + }, + "QueryRolesRequestExample2": { + "summary": "Query roles by description", + "description": "Run `POST /_security/_query/role` to query only the user access role, given its description. It returns only the best matching role because `size` is set to `1`.\n", + "value": "{\n \"query\": {\n \"match\": {\n \"description\": {\n \"query\": \"user access\"\n }\n }\n },\n \"size\": 1 \n}" + } } } } @@ -110520,6 +113260,18 @@ "$ref": "#/components/schemas/_types:SortResults" } } + }, + "examples": { + "SecurityQueryUserRequestExample1": { + "summary": "Query users by role prefix", + "description": "Run `POST /_security/_query/user?with_profile_uid=true` to get users that have roles that are prefixed with `other`. It will also include the user `profile_uid` in the response.\n", + "value": "{\n \"query\": {\n \"prefix\": {\n \"roles\": \"other\"\n }\n }\n}" + }, + "SecurityQueryUserRequestExample2": { + "summary": "Query users with multiple conditions", + "description": "Run `POST /_security/_query/user`. Use a `bool` query to issue complex logical conditions: The `email` must end with `example.com`. The user must be enabled. The result will be filtered to only contain users with at least one role that contains the substring `other`. The offset to begin the search result is the second (zero-based index) user. The page size of the response is two users. The result is sorted by `username` in descending order.\n", + "value": "{\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"wildcard\": {\n \"email\": \"*example.com\" \n }\n },\n {\n \"term\": {\n \"enabled\": true \n }\n }\n ],\n \"filter\": [\n {\n \"wildcard\": {\n \"roles\": \"*other*\" \n }\n }\n ]\n }\n },\n \"from\": 1, \n \"size\": 2, \n \"sort\": [\n { \"username\": { \"order\": \"desc\"} } \n ]\n}" + } } } } @@ -110556,6 +113308,12 @@ "$ref": "#/components/schemas/security.suggest_user_profiles:Hint" } } + }, + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/profile/_suggest` to get suggestions for profile documents with name-related fields matching `jack`. It specifies both `uids` and `labels` hints for better relevance. The `labels` hint ranks profiles higher if their `direction` label matches either `north` or `east`.\n", + "value": "{\n \"name\": \"jack\", \n \"hint\": {\n \"uids\": [ \n \"u_8RKO7AKfEbSiIHZkZZ2LJy2MUSDPWDr3tMI_CkIGApU_0\",\n \"u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0\"\n ],\n \"labels\": {\n \"direction\": [\"north\", \"east\"] \n }\n }\n}" + } } } } @@ -110581,6 +113339,12 @@ } } } + }, + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/profile/u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0/_data` to update a profile document for the `u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0` user profile.\n", + "value": "{\n \"labels\": {\n \"direction\": \"east\"\n },\n \"data\": {\n \"app1\": {\n \"theme\": \"default\"\n }\n }\n}" + } } } }, @@ -110627,6 +113391,28 @@ "required": [ "docs" ] + }, + "examples": { + "SimulateIngestRequestExample1": { + "summary": "Existing pipeline definitions", + "description": "In this example the index `my-index` has a default pipeline called `my-pipeline` and a final pipeline called `my-final-pipeline`. Since both documents are being ingested into `my-index`, both pipelines are run using the pipeline definitions that are already in the system.", + "value": "{\n \"docs\": [\n {\n \"_id\": 123,\n \"_index\": \"my-index\",\n \"_source\": {\n \"foo\": \"bar\"\n }\n },\n {\n \"_id\": 456,\n \"_index\": \"my-index\",\n \"_source\": {\n \"foo\": \"rab\"\n }\n }\n ]\n}" + }, + "SimulateIngestRequestExample2": { + "summary": "Pipeline substitions", + "description": "In this example the index `my-index` has a default pipeline called `my-pipeline` and a final pipeline called `my-final-pipeline`. But a substitute definition of `my-pipeline` is provided in `pipeline_substitutions`. The substitute `my-pipeline` will be used in place of the `my-pipeline` that is in the system, and then the `my-final-pipeline` that is already defined in the system will run.", + "value": "{\n \"docs\": [\n {\n \"_index\": \"my-index\",\n \"_id\": 123,\n \"_source\": {\n \"foo\": \"bar\"\n }\n },\n {\n \"_index\": \"my-index\",\n \"_id\": 456,\n \"_source\": {\n \"foo\": \"rab\"\n }\n }\n ],\n \"pipeline_substitutions\": {\n \"my-pipeline\": {\n \"processors\": [\n {\n \"uppercase\": {\n \"field\": \"foo\"\n }\n }\n ]\n }\n }\n}" + }, + "SimulateIngestRequestExample3": { + "summary": "Component template substitutions", + "description": "In this example, imagine that the index `my-index` has a strict mapping with only the `foo` keyword field defined. Say that field mapping came from a component template named `my-mappings-template`. You want to test adding a new field, `bar`. So a substitute definition of `my-mappings-template` is provided in `component_template_substitutions`. The substitute `my-mappings-template` will be used in place of the existing mapping for `my-index` and in place of the `my-mappings-template` that is in the system.\n", + "value": "{\n \"docs\": [\n {\n \"_index\": \"my-index\",\n \"_id\": \"123\",\n \"_source\": {\n \"foo\": \"foo\"\n }\n },\n {\n \"_index\": \"my-index\",\n \"_id\": \"456\",\n \"_source\": {\n \"bar\": \"rab\"\n }\n }\n ],\n \"component_template_substitutions\": {\n \"my-mappings_template\": {\n \"template\": {\n \"mappings\": {\n \"dynamic\": \"strict\",\n \"properties\": {\n \"foo\": {\n \"type\": \"keyword\"\n },\n \"bar\": {\n \"type\": \"keyword\"\n }\n }\n }\n }\n }\n }\n}" + }, + "SimulateIngestRequestExample4": { + "summary": "Multiple substitutions", + "description": "The pipeline, component template, and index template substitutions replace the existing pipeline details for the duration of this request.", + "value": "{\n \"docs\": [\n {\n \"_id\": \"id\",\n \"_index\": \"my-index\",\n \"_source\": {\n \"foo\": \"bar\"\n }\n },\n {\n \"_id\": \"id\",\n \"_index\": \"my-index\",\n \"_source\": {\n \"foo\": \"rab\"\n }\n }\n ],\n \"pipeline_substitutions\": {\n \"my-pipeline\": {\n \"processors\": [\n {\n \"set\": {\n \"field\": \"field3\",\n \"value\": \"value3\"\n }\n }\n ]\n }\n },\n \"component_template_substitutions\": {\n \"my-component-template\": {\n \"template\": {\n \"mappings\": {\n \"dynamic\": true,\n \"properties\": {\n \"field3\": {\n \"type\": \"keyword\"\n }\n }\n },\n \"settings\": {\n \"index\": {\n \"default_pipeline\": \"my-pipeline\"\n }\n }\n }\n }\n },\n \"index_template_substitutions\": {\n \"my-index-template\": {\n \"index_patterns\": [\n \"my-index-*\"\n ],\n \"composed_of\": [\n \"component_template_1\",\n \"component_template_2\"\n ]\n }\n },\n \"mapping_addition\": {\n \"dynamic\": \"strict\",\n \"properties\": {\n \"foo\": {\n \"type\": \"keyword\"\n }\n }\n }\n}" + } } } }, @@ -110670,6 +113456,12 @@ "type": "boolean" } } + }, + "examples": { + "SnapshotCreateRequestExample1": { + "description": "Run `PUT /_snapshot/my_repository/snapshot_2?wait_for_completion=true` to take a snapshot of `index_1` and `index_2`.", + "value": "{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": false,\n \"metadata\": {\n \"taken_by\": \"user123\",\n \"taken_because\": \"backup before upgrading\"\n }\n}" + } } } } @@ -110679,6 +113471,38 @@ "application/json": { "schema": { "$ref": "#/components/schemas/snapshot._types:Repository" + }, + "examples": { + "SnapshotCreateRepositoryRequestExample1": { + "summary": "A shared file system repository", + "description": "Run `PUT /_snapshot/my_repository` to create or update a shared file system snapshot repository.", + "value": "{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_backup_location\"\n }\n}" + }, + "SnapshotCreateRepositoryRequestExample2": { + "summary": "An Azure repository", + "description": "Run `PUT /_snapshot/my_repository` to create or update an Azure snapshot repository.", + "value": "{\n \"type\": \"azure\",\n \"settings\": {\n \"client\": \"secondary\"\n }\n}" + }, + "SnapshotCreateRepositoryRequestExample3": { + "summary": "A Google Cloud Storage repository", + "description": "Run `PUT /_snapshot/my_gcs_repository` to create or update a Google Cloud Storage snapshot repository.", + "value": "{\n \"type\": \"gcs\",\n \"settings\": {\n \"bucket\": \"my_other_bucket\",\n \"base_path\": \"dev\"\n }\n}" + }, + "SnapshotCreateRepositoryRequestExample4": { + "summary": "An S3 repository", + "description": "Run `PUT /_snapshot/my_s3_repository` to create or update an AWS S3 snapshot repository.", + "value": "{\n \"type\": \"s3\",\n \"settings\": {\n \"bucket\": \"my-bucket\"\n }\n}" + }, + "SnapshotCreateRepositoryRequestExample5": { + "summary": "A source-only repository", + "description": "Run `PUT _snapshot/my_src_only_repository` to create or update a source-only snapshot repository.", + "value": "{\n \"type\": \"source\",\n \"settings\": {\n \"delegate_type\": \"fs\",\n \"location\": \"my_backup_repository\"\n }\n}" + }, + "SnapshotCreateRepositoryRequestExample6": { + "summary": "A read-only URL repository", + "description": "Run `PUT _snapshot/my_read_only_url_repository` to create or update a read-only URL snapshot repository.", + "value": "{\n \"type\": \"url\",\n \"settings\": {\n \"url\": \"file:/mount/backups/my_fs_backup_location\"\n }\n}" + } } } }, @@ -110761,6 +113585,12 @@ "$ref": "#/components/schemas/_types:Duration" } } + }, + "examples": { + "QuerySqlRequestExample1": { + "description": "Run `POST _sql?format=txt` to get results for an SQL search.", + "value": "{\n \"query\": \"SELECT * FROM library ORDER BY page_count DESC LIMIT 5\"\n}" + } } } }, @@ -110790,6 +113620,13 @@ "required": [ "query" ] + }, + "examples": { + "TranslateSqlRequestExample1": { + "summary": "sql/apis/sql-translate-api.asciidoc:12", + "description": "", + "value": "{\n \"query\": \"SELECT * FROM library ORDER BY page_count DESC\",\n \"fetch_size\": 10\n}" + } } } }, @@ -110830,6 +113667,12 @@ "required": [ "field" ] + }, + "examples": { + "TermsEnumRequestExample1": { + "description": "Run `POST stackoverflow/_terms_enum`.", + "value": "{\n \"field\" : \"tags\",\n \"string\" : \"kiba\"\n}" + } } } } @@ -110855,6 +113698,33 @@ } } } + }, + "examples": { + "TermVectorsRequestExample1": { + "summary": "Return stored term vectors", + "description": "Run `GET /my-index-000001/_termvectors/1` to return all information and statistics for field `text` in document 1.\n", + "value": "{\n \"fields\" : [\"text\"],\n \"offsets\" : true,\n \"payloads\" : true,\n \"positions\" : true,\n \"term_statistics\" : true,\n \"field_statistics\" : true\n}" + }, + "TermVectorsRequestExample2": { + "summary": "Per-field analyzer", + "description": "Run `GET /my-index-000001/_termvectors/1` to set per-field analyzers. A different analyzer than the one at the field may be provided by using the `per_field_analyzer` parameter.\n", + "value": "{\n \"doc\" : {\n \"fullname\" : \"John Doe\",\n \"text\" : \"test test test\"\n },\n \"fields\": [\"fullname\"],\n \"per_field_analyzer\" : {\n \"fullname\": \"keyword\"\n }\n}" + }, + "TermVectorsRequestExample3": { + "summary": "Terms filtering", + "description": "Run `GET /imdb/_termvectors` to filter the terms returned based on their tf-idf scores. It returns the three most \"interesting\" keywords from the artificial document having the given \"plot\" field value. Notice that the keyword \"Tony\" or any stop words are not part of the response, as their tf-idf must be too low.\n", + "value": "{\n \"doc\": {\n \"plot\": \"When wealthy industrialist Tony Stark is forced to build an armored suit after a life-threatening incident, he ultimately decides to use its technology to fight against evil.\"\n },\n \"term_statistics\": true,\n \"field_statistics\": true,\n \"positions\": false,\n \"offsets\": false,\n \"filter\": {\n \"max_num_terms\": 3,\n \"min_term_freq\": 1,\n \"min_doc_freq\": 1\n }\n}" + }, + "TermVectorsRequestExample4": { + "summary": "Generate term vectors on the fly", + "description": "Run `GET /my-index-000001/_termvectors/1`. Term vectors which are not explicitly stored in the index are automatically computed on the fly. This request returns all information and statistics for the fields in document 1, even though the terms haven't been explicitly stored in the index. Note that for the field text, the terms are not regenerated.\n", + "value": "{\n \"fields\" : [\"text\", \"some_field_without_term_vectors\"],\n \"offsets\" : true,\n \"positions\" : true,\n \"term_statistics\" : true,\n \"field_statistics\" : true\n}" + }, + "TermVectorsRequestExample5": { + "summary": "Artificial documents", + "description": "Run `GET /my-index-000001/_termvectors`. Term vectors can be generated for artificial documents, that is for documents not present in the index. If dynamic mapping is turned on (default), the document fields not in the original mapping will be dynamically created.\n", + "value": "{\n \"doc\" : {\n \"fullname\" : \"John Doe\",\n \"text\" : \"test test test\"\n }\n}" + } } } } @@ -110876,6 +113746,12 @@ "required": [ "messages" ] + }, + "examples": { + "FindMessageStructureRequestExample1": { + "description": "Run `POST _text_structure/find_message_structure` to analyze Elasticsearch log files.\n", + "value": "{\n \"messages\": [\n \"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\",\n \"[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]\",\n \"[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]\",\n \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]\",\n \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]\",\n \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]\",\n \"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]\",\n \"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]\",\n \"[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]\",\n \"[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled\",\n \"[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled\",\n \"[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled\",\n \"[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]\",\n \"[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]\",\n \"[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized\",\n \"[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ...\"\n ]\n}" + } } } }, @@ -110902,6 +113778,12 @@ "grok_pattern", "text" ] + }, + "examples": { + "TestGrokPatternRequestExample1": { + "description": "Run `GET _text_structure/test_grok_pattern` to test a Grok pattern.", + "value": "{\n \"grok_pattern\": \"Hello %{WORD:first_name} %{WORD:last_name}\",\n \"text\": [\n \"Hello John Doe\",\n \"this does not match\"\n ]\n}" + } } } }, @@ -110942,6 +113824,12 @@ "$ref": "#/components/schemas/transform._types:Latest" } } + }, + "examples": { + "PreviewTransformRequestExample1": { + "description": "Run `POST _transform/_preview` to preview a transform that uses the pivot method.", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\"\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\",\n \"missing_bucket\": true\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n }\n}" + } } } } @@ -110984,6 +113872,23 @@ "$ref": "#/components/schemas/watcher._types:Watch" } } + }, + "examples": { + "WatcherExecuteRequestExample1": { + "summary": "Run a watch", + "description": "Run `POST _watcher/watch/my_watch/_execute` to run a watch. The input defined in the watch is ignored and the `alternative_input` is used as the payload. The condition as defined by the watch is ignored and is assumed to evaluate to true. The `force_simulate` action forces the simulation of `my-action`. Forcing the simulation means that throttling is ignored and the watch is simulated by Watcher instead of being run normally.\n", + "value": "{\n \"trigger_data\" : { \n \"triggered_time\" : \"now\",\n \"scheduled_time\" : \"now\"\n },\n \"alternative_input\" : { \n \"foo\" : \"bar\"\n },\n \"ignore_condition\" : true, \n \"action_modes\" : {\n \"my-action\" : \"force_simulate\" \n },\n \"record_execution\" : true \n}" + }, + "WatcherExecuteRequestExample2": { + "summary": "Run a watch with multiple action modes", + "description": "Run `POST _watcher/watch/my_watch/_execute` and set a different mode for each action.\n", + "value": "{\n \"action_modes\" : {\n \"action1\" : \"force_simulate\",\n \"action2\" : \"skip\"\n }\n}" + }, + "WatcherExecuteRequestExample3": { + "summary": "Run a watch inline", + "description": "Run `POST _watcher/watch/_execute` to run a watch inline. All other settings for this API still apply when inlining a watch. In this example, while the inline watch defines a compare condition, during the execution this condition will be ignored.\n", + "value": "{\n \"watch\" : {\n \"trigger\" : { \"schedule\" : { \"interval\" : \"10s\" } },\n \"input\" : {\n \"search\" : {\n \"request\" : {\n \"indices\" : [ \"logs\" ],\n \"body\" : {\n \"query\" : {\n \"match\" : { \"message\": \"error\" }\n }\n }\n }\n }\n },\n \"condition\" : {\n \"compare\" : { \"ctx.payload.hits.total\" : { \"gt\" : 0 }}\n },\n \"actions\" : {\n \"log_error\" : {\n \"logging\" : {\n \"text\" : \"Found {{ctx.payload.hits.total}} errors in the logs\"\n }\n }\n }\n }\n}" + } } } } @@ -111023,6 +113928,12 @@ "$ref": "#/components/schemas/watcher._types:TriggerContainer" } } + }, + "examples": { + "WatcherPutWatchRequestExample1": { + "description": "Run `PUT _watcher/watch/my-watch` add a watch. The watch schedule triggers every minute. The watch search input looks for any 404 HTTP responses that occurred in the last five minutes. The watch condition checks if any search hits where found. When found, the watch action sends an email to an administrator.\n", + "value": "{\n \"trigger\" : {\n \"schedule\" : { \"cron\" : \"0 0/1 * * * ?\" }\n },\n \"input\" : {\n \"search\" : {\n \"request\" : {\n \"indices\" : [\n \"logstash*\"\n ],\n \"body\" : {\n \"query\" : {\n \"bool\" : {\n \"must\" : {\n \"match\": {\n \"response\": 404\n }\n },\n \"filter\" : {\n \"range\": {\n \"@timestamp\": {\n \"from\": \"{{ctx.trigger.scheduled_time}}||-5m\",\n \"to\": \"{{ctx.trigger.triggered_time}}\"\n }\n }\n }\n }\n }\n }\n }\n }\n },\n \"condition\" : {\n \"compare\" : { \"ctx.payload.hits.total\" : { \"gt\" : 0 }}\n },\n \"actions\" : {\n \"email_admin\" : {\n \"email\" : {\n \"to\" : \"admin@domain.host.com\",\n \"subject\" : \"404 recently encountered\"\n }\n }\n }\n}" + } } } } diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 9db4a6b641..748ddec429 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -67,6 +67,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/async_search._types:AsyncSearchDocumentResponseBase" + }, + "examples": { + "AsyncSearchGetResponseExample1": { + "description": "A succesful response from `GET /_async_search/FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=`.", + "value": "{\n \"id\" : \"FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=\",\n \"is_partial\" : false, \n \"is_running\" : false, \n \"start_time_in_millis\" : 1583945890986,\n \"expiration_time_in_millis\" : 1584377890986, \n \"completion_time_in_millis\" : 1583945903130, \n \"response\" : {\n \"took\" : 12144,\n \"timed_out\" : false,\n \"num_reduce_phases\" : 46, \n \"_shards\" : {\n \"total\" : 562,\n \"successful\" : 188, \n \"skipped\" : 0,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : {\n \"value\" : 456433,\n \"relation\" : \"eq\"\n },\n \"max_score\" : null,\n \"hits\" : [ ]\n },\n \"aggregations\" : { \n \"sale_date\" : {\n \"buckets\" : []\n }\n }\n }\n}" + } } } } @@ -147,6 +153,23 @@ "application/json": { "schema": { "$ref": "#/components/schemas/async_search.status:StatusResponseBase" + }, + "examples": { + "AsyncSearchStatusResponseExample1": { + "summary": "An active async search", + "description": "A succesful response from `GET /_async_search/status/FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=`, which retrieves the status of a previously submitted async search without the results.", + "value": "{\n \"id\" : \"FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=\",\n \"is_running\" : true,\n \"is_partial\" : true,\n \"start_time_in_millis\" : 1583945890986,\n \"expiration_time_in_millis\" : 1584377890986,\n \"_shards\" : {\n \"total\" : 562,\n \"successful\" : 188, \n \"skipped\" : 0,\n \"failed\" : 0\n }\n}" + }, + "AsyncSearchStatusResponseExample2": { + "summary": "A completed async search", + "description": "A succesful response from `GET /_async_search/status/FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=` for an async search that has completed. The status response has an additional `completion_status` field that shows the status code of the completed async search.\n", + "value": "{\n \"id\" : \"FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=\",\n \"is_running\" : false,\n \"is_partial\" : false,\n \"start_time_in_millis\" : 1583945890986,\n \"expiration_time_in_millis\" : 1584377890986,\n \"_shards\" : {\n \"total\" : 562,\n \"successful\" : 562,\n \"skipped\" : 0,\n \"failed\" : 0\n },\n\"completion_status\" : 200 \n}" + }, + "AsyncSearchStatusResponseExample3": { + "summary": "A failed async search", + "description": "A response from `GET /_async_search/status/FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=` for an async search that has completed with an error. The status response has an additional `completion_status` field that shows the status code of the completed async search.\n", + "value": "{\n \"id\" : \"FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=\",\n \"is_running\" : false,\n \"is_partial\" : true,\n \"start_time_in_millis\" : 1583945890986,\n \"expiration_time_in_millis\" : 1584377890986,\n \"_shards\" : {\n \"total\" : 562,\n \"successful\" : 450,\n \"skipped\" : 0,\n \"failed\" : 112\n },\n\"completion_status\" : 503 \n}" + } } } } @@ -1471,6 +1494,12 @@ "required": [ "id" ] + }, + "examples": { + "ClosePointInTimeRequestExample1": { + "description": "Run `DELETE /_pit` to close a point-in-time.", + "value": "{\n \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==\"\n}" + } } } } @@ -1822,6 +1851,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorCheckInResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -1943,6 +1977,11 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "ConnectorDeleteResponseExample1": { + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -2244,6 +2283,11 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "SyncJobDeleteResponseExample1": { + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -2377,6 +2421,11 @@ "required": [ "id" ] + }, + "examples": { + "SyncJobPostRequestExample1": { + "value": "{\n \"id\": \"connector-id\",\n \"job_type\": \"full\",\n \"trigger_method\": \"on_demand\"\n}" + } } } }, @@ -2483,6 +2532,11 @@ "type": "string" } } + }, + "examples": { + "ConnectorUpdateApiKeyIDRequestExample1": { + "value": "{\n \"api_key_id\": \"my-api-key-id\",\n \"api_key_secret_id\": \"my-connector-secret-id\"\n}" + } } } }, @@ -2503,6 +2557,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateAPIKeyIDResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -2548,6 +2607,14 @@ } } } + }, + "examples": { + "ConnectorUpdateConfigurationRequestExample1": { + "value": "{\n \"values\": {\n \"tenant_id\": \"my-tenant-id\",\n \"tenant_name\": \"my-sharepoint-site\",\n \"client_id\": \"foo\",\n \"secret_value\": \"bar\",\n \"site_collections\": \"*\"\n }\n}" + }, + "ConnectorUpdateConfigurationRequestExample2": { + "value": "{\n \"values\": {\n \"secret_value\": \"foo-bar\"\n }\n}" + } } } }, @@ -2568,6 +2635,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateConfigurationResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -2617,6 +2689,11 @@ "required": [ "error" ] + }, + "examples": { + "ConnectorUpdateErrorRequestExample1": { + "value": "{\n \"error\": \"Houston, we have a problem!\"\n}" + } } } }, @@ -2637,6 +2714,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateErrorResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -2688,6 +2770,14 @@ "$ref": "#/components/schemas/connector._types:FilteringAdvancedSnippet" } } + }, + "examples": { + "ConnectorUpdateFilteringRequestExample1": { + "value": "{\n \"rules\": [\n {\n \"field\": \"file_extension\",\n \"id\": \"exclude-txt-files\",\n \"order\": 0,\n \"policy\": \"exclude\",\n \"rule\": \"equals\",\n \"value\": \"txt\"\n },\n {\n \"field\": \"_\",\n \"id\": \"DEFAULT\",\n \"order\": 1,\n \"policy\": \"include\",\n \"rule\": \"regex\",\n \"value\": \".*\"\n }\n ]\n}" + }, + "ConnectorUpdateFilteringRequestExample2": { + "value": "{\n \"advanced_snippet\": {\n \"value\": [{\n \"tables\": [\n \"users\",\n \"orders\"\n ],\n \"query\": \"SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id\"\n }]\n }\n}" + } } } }, @@ -2708,6 +2798,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateFilteringResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -2819,6 +2914,11 @@ "required": [ "index_name" ] + }, + "examples": { + "ConnectorUpdateIndexNameRequestExample1": { + "value": "{\n \"index_name\": \"data-from-my-google-drive\"\n}" + } } } }, @@ -2839,6 +2939,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateIndexNameResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -2880,6 +2985,11 @@ "type": "string" } } + }, + "examples": { + "ConnectorUpdateNameRequestExample1": { + "value": "{\n \"name\": \"Custom connector\",\n \"description\": \"This is my customized connector\"\n}" + } } } }, @@ -2900,6 +3010,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateNameResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -3003,6 +3118,11 @@ "required": [ "pipeline" ] + }, + "examples": { + "ConnectorUpdatePipelineRequestExample1": { + "value": "{\n \"pipeline\": {\n \"extract_binary_content\": true,\n \"name\": \"my-connector-pipeline\",\n \"reduce_whitespace\": true,\n \"run_ml_inference\": true\n }\n}" + } } } }, @@ -3023,6 +3143,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdatePipelineResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -3064,6 +3189,14 @@ "required": [ "scheduling" ] + }, + "examples": { + "ConnectorUpdateSchedulingRequestExample1": { + "value": "{\n \"scheduling\": {\n \"access_control\": {\n \"enabled\": true,\n \"interval\": \"0 10 0 * * ?\"\n },\n \"full\": {\n \"enabled\": true,\n \"interval\": \"0 20 0 * * ?\"\n },\n \"incremental\": {\n \"enabled\": false,\n \"interval\": \"0 30 0 * * ?\"\n }\n }\n}" + }, + "ConnectorUpdateSchedulingRequestExample2": { + "value": "{\n \"scheduling\": {\n \"full\": {\n \"enabled\": true,\n \"interval\": \"0 10 0 * * ?\"\n }\n }\n}" + } } } }, @@ -3084,6 +3217,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateSchedulingResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -3125,6 +3263,11 @@ "required": [ "service_type" ] + }, + "examples": { + "ConnectorUpdateServiceTypeRequestExample1": { + "value": "{\n \"service_type\": \"sharepoint_online\"\n}" + } } } }, @@ -3145,6 +3288,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateServiceTypeResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -3186,6 +3334,11 @@ "required": [ "status" ] + }, + "examples": { + "ConnectorUpdateStatusRequestExample1": { + "value": "{\n \"status\": \"needs_configuration\"\n}" + } } } }, @@ -3206,6 +3359,11 @@ "required": [ "result" ] + }, + "examples": { + "ConnectorUpdateStatusResponseExample1": { + "value": "{\n \"result\": \"updated\"\n}" + } } } } @@ -3709,6 +3867,23 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_global.get:GetResult" + }, + "examples": { + "GetResponseExample1": { + "summary": "Get a document", + "description": "A successful response from `GET my-index-000001/_doc/0`. It retrieves the JSON document with the `_id` 0 from the `my-index-000001` index.\n", + "value": "{\n \"_index\": \"my-index-000001\",\n \"_id\": \"0\",\n \"_version\": 1,\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"found\": true,\n \"_source\": {\n \"@timestamp\": \"2099-11-15T14:12:12\",\n \"http\": {\n \"request\": {\n \"method\": \"get\"\n },\n \"response\": {\n \"status_code\": 200,\n \"bytes\": 1070000\n },\n \"version\": \"1.1\"\n },\n \"source\": {\n \"ip\": \"127.0.0.1\"\n },\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n }\n}" + }, + "GetResponseExample2": { + "summary": "Get stored fields", + "description": "A successful response from `GET my-index-000001/_doc/1?stored_fields=tags,counter`, which retrieves a set of stored fields. Field values fetched from the document itself are always returned as an array. Any requested fields that are not stored (such as the counter field in this example) are ignored.\n", + "value": "{\n \"_index\": \"my-index-000001\",\n \"_id\": \"1\",\n \"_version\": 1,\n \"_seq_no\" : 22,\n \"_primary_term\" : 1,\n \"found\": true,\n \"fields\": {\n \"tags\": [\n \"production\"\n ]\n }\n}" + }, + "GetResponseExample3": { + "summary": "Get metadata fields", + "description": "A successful response from `GET my-index-000001/_doc/2?routing=user1&stored_fields=tags,counter`, which retrieves the `_routing` metadata field.\n", + "value": "{\n \"_index\": \"my-index-000001\",\n \"_id\": \"2\",\n \"_version\": 1,\n \"_seq_no\" : 13,\n \"_primary_term\" : 1,\n \"_routing\": \"user1\",\n \"found\": true,\n \"fields\": {\n \"tags\": [\n \"env2\"\n ]\n }\n}" + } } } } @@ -3959,6 +4134,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:WriteResponseBase" + }, + "examples": { + "DeleteResponseExample1": { + "description": "A successful response from `DELETE /my-index-000001/_doc/1`, which deletes the JSON document 1 from the `my-index-000001` index.", + "value": "{\n \"_shards\": {\n \"total\": 2,\n \"failed\": 0,\n \"successful\": 2\n },\n \"_index\": \"my-index-000001\",\n \"_id\": \"1\",\n \"_version\": 2,\n \"_primary_term\": 1,\n \"_seq_no\": 5,\n \"result\": \"deleted\"\n}" + } } } } @@ -4440,6 +4621,28 @@ "$ref": "#/components/schemas/_types:SlicedScroll" } } + }, + "examples": { + "DeleteByQueryRequestExample1": { + "summary": "Delete all documents", + "description": "Run `POST /my-index-000001,my-index-000002/_delete_by_query` to delete all documents from multiple data streams or indices.", + "value": "{\n \"query\": {\n \"match_all\": {}\n }\n}" + }, + "DeleteByQueryRequestExample2": { + "summary": "Delete a single document", + "description": "Run `POST my-index-000001/_delete_by_query` to delete a document by using a unique attribute.", + "value": "{\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"max_docs\": 1\n}" + }, + "DeleteByQueryRequestExample3": { + "summary": "Slice manually", + "description": "Run `POST my-index-000001/_delete_by_query` to slice a delete by query manually. Provide a slice ID and total number of slices.\n", + "value": "{\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n },\n \"query\": {\n \"range\": {\n \"http.response.bytes\": {\n \"lt\": 2000000\n }\n }\n }\n}" + }, + "DeleteByQueryRequestExample4": { + "summary": "Automatic slicing", + "description": "Run `POST my-index-000001/_delete_by_query?refresh&slices=5` to let delete by query automatically parallelize using sliced scroll to slice on `_id`. The `slices` query parameter value specifies the number of slices to use.\n", + "value": "{\n \"query\": {\n \"range\": {\n \"http.response.bytes\": {\n \"lt\": 2000000\n }\n }\n }\n}" + } } } }, @@ -4513,6 +4716,12 @@ "type": "number" } } + }, + "examples": { + "DeleteByQueryResponseExample1": { + "description": "A successful response from `POST /my-index-000001/_delete_by_query`.", + "value": "{\n \"took\" : 147,\n \"timed_out\": false,\n \"total\": 119,\n \"deleted\": 119,\n \"batches\": 1,\n \"version_conflicts\": 0,\n \"noops\": 0,\n \"retries\": {\n \"bulk\": 0,\n \"search\": 0\n },\n \"throttled_millis\": 0,\n \"requests_per_second\": -1.0,\n \"throttled_until_millis\": 0,\n \"failures\" : [ ]\n}" + } } } } @@ -5064,6 +5273,12 @@ "is_partial", "is_running" ] + }, + "examples": { + "EqlGetStatusResponseExample1": { + "description": "A successful response for getting status information for an async EQL search.", + "value": "{\n \"id\": \"FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=\",\n \"is_running\" : true,\n \"is_partial\" : true,\n \"start_time_in_millis\" : 1611690235000,\n \"expiration_time_in_millis\" : 1611690295000\n}" + } } } } @@ -5260,6 +5475,12 @@ "required": [ "query" ] + }, + "examples": { + "QueryRequestExample1": { + "description": "Run `POST /_query` to get results for an ES|QL query.", + "value": "{\n \"query\": \"\"\"\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n \"\"\"\n}" + } } } }, @@ -6098,6 +6319,12 @@ "shards_acknowledged", "indices" ] + }, + "examples": { + "IndicesAddBlockResponseExample1": { + "description": "A successful response from `PUT /my-index-000001/_block/write`, which adds an index block to an index.'", + "value": "{\n \"acknowledged\" : true,\n \"shards_acknowledged\" : true,\n \"indices\" : [ {\n \"name\" : \"my-index-000001\",\n \"blocked\" : true\n } ]\n}" + } } } } @@ -6383,6 +6610,23 @@ "$ref": "#/components/schemas/indices._types:IndexSettings" } } + }, + "examples": { + "indicesCreateRequestExample1": { + "summary": "Create an index.", + "description": "This request specifies the `number_of_shards` and `number_of_replicas`.", + "value": "{\n \"settings\": {\n \"number_of_shards\": 3,\n \"number_of_replicas\": 2\n }\n}" + }, + "indicesCreateRequestExample2": { + "summary": "Create an index with mappings.", + "description": "You can provide mapping definitions in the create index API requests.", + "value": "{\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"mappings\": {\n \"properties\": {\n \"field1\": { \"type\": \"text\" }\n }\n }\n}" + }, + "indicesCreateRequestExample3": { + "summary": "Create an index with aliases.", + "description": "You can provide mapping definitions in the create index API requests. Index alias names also support date math.\n", + "value": "{\n \"aliases\": {\n \"alias_1\": {},\n \"alias_2\": {\n \"filter\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"routing\": \"shard-1\"\n }\n }\n}" + } } } } @@ -7283,6 +7527,18 @@ "required": [ "indices" ] + }, + "examples": { + "IndicesExplainDataLifecycleResponseExample1": { + "summary": "Successful response", + "description": "A successful response from `GET .ds-metrics-2023.03.22-000001/_lifecycle/explain`, which retrieves the lifecycle status for a data stream backing index. If the index is managed by a data stream lifecycle, the API will show the `managed_by_lifecycle` field set to `true` and the rest of the response will contain information about the lifecycle execution status for this index.\n", + "value": "{\n \"indices\": {\n \".ds-metrics-2023.03.22-000001\": {\n \"index\" : \".ds-metrics-2023.03.22-000001\",\n \"managed_by_lifecycle\" : true,\n \"index_creation_date_millis\" : 1679475563571,\n \"time_since_index_creation\" : \"843ms\",\n \"rollover_date_millis\" : 1679475564293,\n \"time_since_rollover\" : \"121ms\",\n \"lifecycle\" : { },\n \"generation_time\" : \"121ms\"\n }\n}" + }, + "IndicesExplainDataLifecycleResponseExample2": { + "summary": "Successful response with error messages", + "description": "The API reports any errors related to the lifecycle execution for the target index.", + "value": "{\n \"indices\": {\n \".ds-metrics-2023.03.22-000001\": {\n \"index\" : \".ds-metrics-2023.03.22-000001\",\n \"managed_by_lifecycle\" : true,\n \"index_creation_date_millis\" : 1679475563571,\n \"time_since_index_creation\" : \"843ms\",\n \"lifecycle\" : {\n \"enabled\": true\n },\n \"error\": \"{\\\"type\\\":\\\"validation_exception\\\",\\\"reason\\\":\\\"Validation Failed: 1: this action would add [2] shards, but this cluster\ncurrently has [4]/[3] maximum normal shards open;\\\"}\"\n }\n}" + } } } } @@ -7421,6 +7677,12 @@ "required": [ "data_streams" ] + }, + "examples": { + "IndicesGetDataLifecycleResponseExample1": { + "description": "A successful response from `GET _lifecycle/stats?human&pretty`.", + "value": "{\n \"data_streams\": [\n {\n \"name\": \"my-data-stream-1\",\n \"lifecycle\": {\n \"enabled\": true,\n \"data_retention\": \"7d\"\n }\n },\n {\n \"name\": \"my-data-stream-2\",\n \"lifecycle\": {\n \"enabled\": true,\n \"data_retention\": \"7d\"\n }\n }\n ]\n}" + } } } } @@ -7483,6 +7745,17 @@ "application/json": { "schema": { "$ref": "#/components/schemas/indices._types:DataStreamLifecycle" + }, + "examples": { + "IndicesPutDataLifecycleRequestExample1": { + "summary": "Set the data stream lifecycle retention", + "value": "{\n \"data_retention\": \"7d\"\n}" + }, + "IndicesPutDataLifecycleRequestExample2": { + "summary": "Set the data stream lifecycle downsampling", + "description": "This example configures two downsampling rounds.", + "value": "{\n \"downsampling\": [\n {\n \"after\": \"1d\",\n \"fixed_interval\": \"10m\"\n },\n {\n \"after\": \"7d\",\n \"fixed_interval\": \"1d\"\n }\n ]\n}" + } } } }, @@ -8255,6 +8528,12 @@ "aliases", "data_streams" ] + }, + "examples": { + "ResolveIndexResponseExample1": { + "description": "A successful response from `GET /_resolve/index/f*,remoteCluster1:bar*?expand_wildcards=all`.", + "value": "{\n \"indices\": [\n {\n \"name\": \"foo_closed\",\n \"attributes\": [\n \"closed\"\n ]\n },\n {\n \"name\": \"freeze-index\",\n \"aliases\": [\n \"f-alias\"\n ],\n \"attributes\": [\n \"open\"\n ]\n },\n {\n \"name\": \"remoteCluster1:bar-01\",\n \"attributes\": [\n \"open\"\n ]\n }\n ],\n \"aliases\": [\n {\n \"name\": \"f-alias\",\n \"indices\": [\n \"freeze-index\",\n \"my-index-000001\"\n ]\n }\n ],\n \"data_streams\": [\n {\n \"name\": \"foo\",\n \"backing_indices\": [\n \".ds-foo-2099.03.07-000001\"\n ],\n \"timestamp_field\": \"@timestamp\"\n }\n ]\n}" + } } } } @@ -9066,6 +9345,12 @@ "tagline", "version" ] + }, + "examples": { + "RootNodeInfoResponseExample1": { + "description": "A successful response from `GET /`s.", + "value": "{\n \"name\": \"instance-0000000000\",\n \"cluster_name\": \"my_test_cluster\",\n \"cluster_uuid\": \"5QaxoN0pRZuOmWSxstBBwQ\",\n \"version\": {\n \"build_date\": \"2024-02-01T13:07:13.727175297Z\",\n \"minimum_wire_compatibility_version\": \"7.17.0\",\n \"build_hash\": \"6185ba65d27469afabc9bc951cded6c17c21e3f3\",\n \"number\": \"8.12.1\",\n \"lucene_version\": \"9.9.2\",\n \"minimum_index_compatibility_version\": \"7.0.0\",\n \"build_flavor\": \"default\",\n \"build_snapshot\": false,\n \"build_type\": \"docker\"\n },\n \"tagline\": \"You Know, for Search\"\n}" + } } } } @@ -9206,6 +9491,17 @@ "type": "boolean" } } + }, + "examples": { + "PutPipelineRequestExample1": { + "summary": "Create an ingest pipeline.", + "value": "{\n \"description\" : \"My optional pipeline description\",\n \"processors\" : [\n {\n \"set\" : {\n \"description\" : \"My optional processor description\",\n \"field\": \"my-keyword-field\",\n \"value\": \"foo\"\n }\n }\n ]\n}" + }, + "PutPipelineRequestExample2": { + "summary": "Create an ingest pipeline with metadata.", + "description": "You can use the `_meta` parameter to add arbitrary metadata to a pipeline.", + "value": "{\n \"description\" : \"My optional pipeline description\",\n \"processors\" : [\n {\n \"set\" : {\n \"description\" : \"My optional processor description\",\n \"field\": \"my-keyword-field\",\n \"value\": \"foo\"\n }\n }\n ],\n \"_meta\": {\n \"reason\": \"set my-keyword-field to foo\",\n \"serialization\": {\n \"class\": \"MyPipeline\",\n \"id\": 10\n }\n }\n}" + } } } }, @@ -9490,6 +9786,12 @@ "required": [ "license" ] + }, + "examples": { + "GetLicenseResponseExample1": { + "description": "A successful response from `GET /_license`.", + "value": "{\n \"license\" : {\n \"status\" : \"active\",\n \"uid\" : \"cbff45e7-c553-41f7-ae4f-9205eabd80xx\",\n \"type\" : \"trial\",\n \"issue_date\" : \"2018-10-20T22:05:12.332Z\",\n \"issue_date_in_millis\" : 1540073112332,\n \"expiry_date\" : \"2018-11-19T22:05:12.332Z\",\n \"expiry_date_in_millis\" : 1542665112332,\n \"max_nodes\" : 1000,\n \"max_resource_units\" : null,\n \"issued_to\" : \"test\",\n \"issuer\" : \"elasticsearch\",\n \"start_date_in_millis\" : -1\n }\n}" + } } } } @@ -9548,6 +9850,13 @@ "application/json": { "schema": { "$ref": "#/components/schemas/logstash._types:Pipeline" + }, + "examples": { + "LogstashPutPipelineRequestExample1": { + "summary": "Create a pipeline", + "description": "Run `PUT _logstash/pipeline/my_pipeline` to create a pipeline.", + "value": "{\n \"description\": \"Sample pipeline for illustration purposes\",\n \"last_modified\": \"2021-01-02T02:50:51.250Z\",\n \"pipeline_metadata\": {\n \"type\": \"logstash_pipeline\",\n \"version\": 1\n },\n \"username\": \"elastic\",\n \"pipeline\": \"input {}\\\\n filter { grok {} }\\\\n output {}\",\n \"pipeline_settings\": {\n \"pipeline.workers\": 1,\n \"pipeline.batch.size\": 125,\n \"pipeline.batch.delay\": 50,\n \"queue.type\": \"memory\",\n \"queue.max_bytes\": \"1gb\",\n \"queue.checkpoint.writes\": 1024\n }\n}" + } } } }, @@ -9886,6 +10195,12 @@ "required": [ "closed" ] + }, + "examples": { + "MlCloseJobResponseExample1": { + "description": "A successful response when closing anomaly detection jobs.", + "value": "{\n \"closed\": true\n}" + } } } } @@ -10047,6 +10362,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteCalendarResponseExample1": { + "description": "A successful response when deleting a calendar.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -10093,6 +10414,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteCalendarEventResponseExample1": { + "description": "A successful response when deleting a calendar event.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -10215,6 +10542,12 @@ "calendar_id", "job_ids" ] + }, + "examples": { + "MlDeleteCalendarJobResponseExample1": { + "description": "A successful response when deleting an anomaly detection job from a calendar.", + "value": "{\n \"calendar_id\": \"planned-outages\",\n \"job_ids\": []\n}" + } } } } @@ -10441,6 +10774,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteDataFrameAnalyticsResponseExample1": { + "description": "A successful response when deleting a data frame analytics job.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -10714,6 +11053,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteDatafeedResponseExample1": { + "description": "A successful response when deleting a datafeed.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -10851,6 +11196,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteFilterResponseExample1": { + "description": "A successful response when deleting a filter.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -11014,6 +11365,12 @@ "analysis_config", "data_description" ] + }, + "examples": { + "MlPutJobRequestExample1": { + "description": "A request to create an anomaly detection job and datafeed.", + "value": "{\n \"analysis_config\": {\n \"bucket_span\": \"15m\",\n \"detectors\": [\n {\n \"detector_description\": \"Sum of bytes\",\n \"function\": \"sum\",\n \"field_name\": \"bytes\"\n }\n ]\n },\n \"data_description\": {\n \"time_field\": \"timestamp\",\n \"time_format\": \"epoch_ms\"\n },\n \"analysis_limits\": {\n \"model_memory_limit\": \"11MB\"\n },\n \"model_plot_config\": {\n \"enabled\": true,\n \"annotations_enabled\": true\n },\n \"results_index_name\": \"test-job1\",\n \"datafeed_config\": {\n \"indices\": [\n \"kibana_sample_data_logs\"\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match_all\": {}\n }\n ]\n }\n },\n \"runtime_mappings\": {\n \"hour_of_day\": {\n \"type\": \"long\",\n \"script\": {\n \"source\": \"emit(doc['timestamp'].value.getHour());\"\n }\n }\n },\n \"datafeed_id\": \"datafeed-test-job1\"\n }\n}" + } } } }, @@ -11168,6 +11525,18 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteJobResponseExample1": { + "summary": "Delete job", + "description": "A successful response when deleting an anomaly detection job.", + "value": "{\n \"acknowledged\": true\n}" + }, + "MlDeleteJobResponseExample2": { + "summary": "Delete job asynchronously", + "description": "A successful response when deleting an anomaly detection job asynchronously. When the `wait_for_completion` query parameter is set to `false`, the response contains an identifier for the job deletion task.\n", + "value": "{\n \"task\": \"oTUltX4IQMOUUVeiohTt8A:39\"\n}" + } } } } @@ -11374,6 +11743,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteTrainedModelResponseExample1": { + "description": "A successful response when deleting an existing trained inference model.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -11476,6 +11851,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "MlDeleteTrainedModelAliasResponseExample1": { + "description": "A successful response when deleting a trained model alias.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -11516,6 +11897,12 @@ } } } + }, + "examples": { + "MlEstimateModelMemoryRequestExample1": { + "description": "Run `POST _ml/anomaly_detectors/_estimate_model_memory` to estimate the model memory limit based on the analysis configuration details provided in the request body.", + "value": "{\n \"analysis_config\": {\n \"bucket_span\": \"5m\",\n \"detectors\": [\n {\n \"function\": \"sum\",\n \"field_name\": \"bytes\",\n \"by_field_name\": \"status\",\n \"partition_field_name\": \"app\"\n }\n ],\n \"influencers\": [\n \"source_ip\",\n \"dest_ip\"\n ]\n },\n \"overall_cardinality\": {\n \"status\": 10,\n \"app\": 50\n },\n \"max_bucket_cardinality\": {\n \"source_ip\": 300,\n \"dest_ip\": 30\n }\n}" + } } } }, @@ -11536,6 +11923,12 @@ "required": [ "model_memory_estimate" ] + }, + "examples": { + "MlEstimateModelMemoryResponseExample1": { + "description": "A successful response from `POST _ml/anomaly_detectors/_estimate_model_memory`.", + "value": "{\n \"model_memory_estimate\": \"21mb\"\n}" + } } } } @@ -11572,6 +11965,33 @@ "evaluation", "index" ] + }, + "examples": { + "MlEvaluateDataFrameRequestExample1": { + "summary": "Classification example 1", + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate a a classification job for an annotated index. The `actual_field` contains the ground truth for classification. The `predicted_field` contains the predicted value calculated by the classification analysis.\n", + "value": "{\n \"index\": \"animal_classification\",\n \"evaluation\": {\n \"classification\": {\n \"actual_field\": \"animal_class\",\n \"predicted_field\": \"ml.animal_class_prediction\",\n \"metrics\": {\n \"multiclass_confusion_matrix\": {}\n }\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample2": { + "summary": "Classification example 2", + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate a classification job with AUC ROC metrics for an annotated index. The `actual_field` contains the ground truth value for the actual animal classification. This is required in order to evaluate results. The `class_name` specifies the class name that is treated as positive during the evaluation, all the other classes are treated as negative.\n", + "value": "{\n \"index\": \"animal_classification\",\n \"evaluation\": {\n \"classification\": {\n \"actual_field\": \"animal_class\",\n \"metrics\": {\n \"auc_roc\": {\n \"class_name\": \"dog\"\n }\n }\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample3": { + "summary": "Outlier detection", + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate an outlier detection job for an annotated index.\n", + "value": "{\n \"index\": \"my_analytics_dest_index\",\n \"evaluation\": {\n \"outlier_detection\": {\n \"actual_field\": \"is_outlier\",\n \"predicted_probability_field\": \"ml.outlier_score\"\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample4": { + "summary": "Regression example 1", + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate the testing error of a regression job for an annotated index. The term query in the body limits evaluation to be performed on the test split only. The `actual_field` contains the ground truth for house prices. The `predicted_field` contains the house price calculated by the regression analysis.\n", + "value": "{\n \"index\": \"house_price_predictions\",\n \"query\": {\n \"bool\": {\n \"filter\": [\n {\n \"term\": {\n \"ml.is_training\": false\n }\n }\n ]\n },\n \"evaluation\": {\n \"regression\": {\n \"actual_field\": \"price\",\n \"predicted_field\": \"ml.price_prediction\",\n \"metrics\": {\n \"r_squared\": {},\n \"mse\": {},\n \"msle\": {\n \"offset\": 10\n },\n \"huber\": {\n \"delta\": 1.5\n }\n }\n }\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample5": { + "summary": "Regression example 2", + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate the training error of a regression job for an annotated index. The term query in the body limits evaluation to be performed on the training split only. The `actual_field` contains the ground truth for house prices. The `predicted_field` contains the house price calculated by the regression analysis.\n", + "value": "{\n \"index\": \"house_price_predictions\",\n \"query\": {\n \"term\": {\n \"ml.is_training\": {\n \"value\": true\n }\n }\n },\n \"evaluation\": {\n \"regression\": {\n \"actual_field\": \"price\",\n \"predicted_field\": \"ml.price_prediction\",\n \"metrics\": {\n \"r_squared\": {},\n \"mse\": {},\n \"msle\": {},\n \"huber\": {}\n }\n }\n }\n}" + } } } }, @@ -11595,6 +12015,23 @@ "$ref": "#/components/schemas/ml.evaluate_data_frame:DataframeRegressionSummary" } } + }, + "examples": { + "MlEvaluateDataFrameResponseExample1": { + "summary": "Classification example 1", + "description": "A succesful response from `POST _ml/data_frame/_evaluate` to evaluate a classification analysis job for an annotated index. The `actual_class` contains the name of the class the analysis tried to predict. The `actual_class_doc_count` is the number of documents in the index belonging to the `actual_class`. The `predicted_classes` object contains the list of the predicted classes and the number of predictions associated with the class.\n", + "value": "{\n \"classification\": {\n \"multiclass_confusion_matrix\": {\n \"confusion_matrix\": [\n {\n \"actual_class\": \"cat\",\n \"actual_class_doc_count\": 12,\n \"predicted_classes\": [\n {\n \"predicted_class\": \"cat\",\n \"count\": 12\n },\n {\n \"predicted_class\": \"dog\",\n \"count\": 0\n }\n ],\n \"other_predicted_class_doc_count\": 0\n },\n {\n \"actual_class\": \"dog\",\n \"actual_class_doc_count\": 11,\n \"predicted_classes\": [\n {\n \"predicted_class\": \"dog\",\n \"count\": 7\n },\n {\n \"predicted_class\": \"cat\",\n \"count\": 4\n }\n ],\n \"other_predicted_class_doc_count\": 0\n }\n ],\n \"other_actual_class_count\": 0\n }\n }\n}" + }, + "MlEvaluateDataFrameResponseExample2": { + "summary": "Classification example 2", + "description": "A succesful response from `POST _ml/data_frame/_evaluate` to evaluate a classification analysis job with the AUC ROC metrics for an annotated index.\n", + "value": "{\n \"classification\": {\n \"auc_roc\": {\n \"value\": 0.8941788639536681\n }\n }\n}" + }, + "MlEvaluateDataFrameResponseExample3": { + "summary": "Outlier detection", + "description": "A successful response from `POST _ml/data_frame/_evaluate` to evaluate an outlier detection job.", + "value": "{\n \"outlier_detection\": {\n \"auc_roc\": {\n \"value\": 0.9258475774641445\n },\n \"confusion_matrix\": {\n \"0.25\": {\n \"tp\": 5,\n \"fp\": 9,\n \"tn\": 204,\n \"fn\": 5\n },\n \"0.5\": {\n \"tp\": 1,\n \"fp\": 5,\n \"tn\": 208,\n \"fn\": 9\n },\n \"0.75\": {\n \"tp\": 0,\n \"fp\": 4,\n \"tn\": 209,\n \"fn\": 10\n }\n },\n \"precision\": {\n \"0.25\": 0.35714285714285715,\n \"0.5\": 0.16666666666666666,\n \"0.75\": 0\n },\n \"recall\": {\n \"0.25\": 0.5,\n \"0.5\": 0.1,\n \"0.75\": 0\n }\n }\n}" + } } } } @@ -12475,6 +12912,12 @@ "$ref": "#/components/schemas/_types:Duration" } } + }, + "examples": { + "MlOpenJobRequestExample1": { + "description": "A request to open anomaly detection jobs. The timeout specifies to wait 35 minutes for the job to open.\n", + "value": "{\n \"timeout\": \"35m\"\n}" + } } } } @@ -14816,6 +15259,12 @@ "_shards", "id" ] + }, + "examples": { + "OpenPointInTimeResponseExample1": { + "description": "A successful response from `POST /my-index-000001/_pit?keep_alive=1m&allow_partial_search_results=true`. It includes a summary of the total number of shards, as well as the number of successful shards when creating the PIT.\n", + "value": "{\n \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA=\",\n \"_shards\": {\n \"total\": 10,\n \"successful\": 10,\n \"skipped\": 0,\n \"failed\": 0\n }\n}" + } } } } @@ -14940,6 +15389,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/query_rules._types:QueryRule" + }, + "examples": { + "QueryRuleGetResponseExample1": { + "description": "A successful response from `GET _query_rules/my-ruleset/_rule/my-rule1`.", + "value": "{\n \"rule_id\": \"my-rule1\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"contains\",\n \"metadata\": \"query_string\",\n \"values\": [\n \"pugs\",\n \"puggles\"\n ]\n }\n ],\n \"actions\": {\n \"ids\": [\n \"id1\",\n \"id2\"\n ]\n }\n}" + } } } } @@ -15013,6 +15468,12 @@ "criteria", "actions" ] + }, + "examples": { + "QueryRulePutRequestExample1": { + "description": "Run `POST _query_rules/my-ruleset/_test` to test a ruleset. Provide the match criteria that you want to test against.\n", + "value": "{\n \"match_criteria\": {\n \"query_string\": \"puggles\"\n }\n}" + } } } }, @@ -15114,6 +15575,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/query_rules._types:QueryRuleset" + }, + "examples": { + "QueryRulesetGetResponseExample1": { + "description": "A successful response from `GET _query_rules/my-ruleset/`.", + "value": "{\n \"ruleset_id\": \"my-ruleset\",\n \"rules\": [\n {\n \"rule_id\": \"my-rule1\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"contains\",\n \"metadata\": \"query_string\",\n \"values\": [ \"pugs\", \"puggles\" ]\n }\n ],\n \"actions\": {\n \"ids\": [\n \"id1\",\n \"id2\"\n ]\n }\n },\n {\n \"rule_id\": \"my-rule2\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"fuzzy\",\n \"metadata\": \"query_string\",\n \"values\": [ \"rescue dogs\" ]\n }\n ],\n \"actions\": {\n \"docs\": [\n {\n \"_index\": \"index1\",\n \"_id\": \"id3\"\n },\n {\n \"_index\": \"index2\",\n \"_id\": \"id4\"\n }\n ]\n }\n }\n ]\n}" + } } } } @@ -15167,6 +15634,12 @@ "required": [ "rules" ] + }, + "examples": { + "QueryRulesetPutRequestExample1": { + "description": "Run `PUT _query_rules/my-ruleset` to create a new query ruleset. Two rules are associated with `my-ruleset`. `my-rule1` will pin documents with IDs `id1` and `id2` when `user_query` contains `pugs` or `puggles` and `user_country` exactly matches `us`. `my-rule2` will exclude documents from different specified indices with IDs `id3` and `id4` when the `query_string` fuzzily matches `rescue dogs`.\n", + "value": "{\n \"rules\": [\n {\n \"rule_id\": \"my-rule1\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"contains\",\n \"metadata\": \"user_query\",\n \"values\": [ \"pugs\", \"puggles\" ]\n },\n {\n \"type\": \"exact\",\n \"metadata\": \"user_country\",\n \"values\": [ \"us\" ]\n }\n ],\n \"actions\": {\n \"ids\": [\n \"id1\",\n \"id2\"\n ]\n }\n },\n {\n \"rule_id\": \"my-rule2\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"fuzzy\",\n \"metadata\": \"user_query\",\n \"values\": [ \"rescue dogs\" ]\n }\n ],\n \"actions\": {\n \"docs\": [\n {\n \"_index\": \"index1\",\n \"_id\": \"id3\"\n },\n {\n \"_index\": \"index2\",\n \"_id\": \"id4\"\n }\n ]\n }\n }\n ]\n}" + } } } }, @@ -15281,6 +15754,12 @@ "count", "results" ] + }, + "examples": { + "QueryRulesetListResponseExample1": { + "description": "A successful response from `GET _query_rules/?from=0&size=3`.", + "value": "{\n \"count\": 3,\n \"results\": [\n {\n \"ruleset_id\": \"ruleset-1\",\n \"rule_total_count\": 1,\n \"rule_criteria_types_counts\": {\n \"exact\": 1\n }\n },\n {\n \"ruleset_id\": \"ruleset-2\",\n \"rule_total_count\": 2,\n \"rule_criteria_types_counts\": {\n \"exact\": 1,\n \"fuzzy\": 1\n }\n },\n {\n \"ruleset_id\": \"ruleset-3\",\n \"rule_total_count\": 3,\n \"rule_criteria_types_counts\": {\n \"exact\": 1,\n \"fuzzy\": 2\n }\n }\n ]\n}" + } } } } @@ -15327,6 +15806,12 @@ "required": [ "match_criteria" ] + }, + "examples": { + "QueryRulesetTestRequestExample1": { + "description": "Run `PUT _query_rules/my-ruleset` to create a new query ruleset. Two rules are associated with `my-ruleset`. `my-rule1` will pin documents with IDs `id1` and `id2` when `user_query` contains `pugs` or `puggles` and `user_country` exactly matches `us`. `my-rule2` will exclude documents from different specified indices with IDs `id3` and `id4` when the `query_string` fuzzily matches `rescue dogs`.\n", + "value": "{\n \"rules\": [\n {\n \"rule_id\": \"my-rule1\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"contains\",\n \"metadata\": \"user_query\",\n \"values\": [ \"pugs\", \"puggles\" ]\n },\n {\n \"type\": \"exact\",\n \"metadata\": \"user_country\",\n \"values\": [ \"us\" ]\n }\n ],\n \"actions\": {\n \"ids\": [\n \"id1\",\n \"id2\"\n ]\n }\n },\n {\n \"rule_id\": \"my-rule2\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"fuzzy\",\n \"metadata\": \"user_query\",\n \"values\": [ \"rescue dogs\" ]\n }\n ],\n \"actions\": {\n \"docs\": [\n {\n \"_index\": \"index1\",\n \"_id\": \"id3\"\n },\n {\n \"_index\": \"index2\",\n \"_id\": \"id4\"\n }\n ]\n }\n }\n ]\n}" + } } } }, @@ -15354,6 +15839,12 @@ "total_matched_rules", "matched_rules" ] + }, + "examples": { + "QueryRulesetTestResponseExample1": { + "description": "A successful response from `POST _query_rules/my-ruleset/_test`.", + "value": "{\n \"total_matched_rules\": 1,\n \"matched_rules\": [\n {\n \"ruleset_id\": \"my-ruleset\",\n \"rule_id\": \"my-rule1\"\n }\n ]\n}" + } } } } @@ -15616,6 +16107,73 @@ "dest", "source" ] + }, + "examples": { + "ReindexRequestExample1": { + "summary": "Reindex multiple sources", + "description": "Run `POST _reindex` to reindex from multiple sources. The `index` attribute in source can be a list, which enables you to copy from lots of sources in one request. This example copies documents from the `my-index-000001` and `my-index-000002` indices.\n", + "value": "{\n \"source\": {\n \"index\": [\"my-index-000001\", \"my-index-000002\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000002\"\n }\n}" + }, + "ReindexRequestExample10": { + "summary": "Reindex with Painless", + "description": "You can use Painless to reindex daily indices to apply a new template to the existing documents. The script extracts the date from the index name and creates a new index with `-1` appended. For example, all data from `metricbeat-2016.05.31` will be reindexed into `metricbeat-2016.05.31-1`.\n", + "value": "{\n \"source\": {\n \"index\": \"metricbeat-*\"\n },\n \"dest\": {\n \"index\": \"metricbeat\"\n },\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'\"\n }\n}" + }, + "ReindexRequestExample11": { + "summary": "Reindex a random subset", + "description": "Run `POST _reindex` to extract a random subset of the source for testing. You might need to adjust the `min_score` value depending on the relative amount of data extracted from source.\n", + "value": "{\n \"max_docs\": 10,\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"function_score\" : {\n \"random_score\" : {},\n \"min_score\" : 0.9\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample12": { + "summary": "Reindex modified documents", + "description": "Run `POST _reindex` to modify documents during reindexing. This example bumps the version of the source document.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\",\n \"version_type\": \"external\"\n },\n \"script\": {\n \"source\": \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\",\n \"lang\": \"painless\"\n }\n}" + }, + "ReindexRequestExample13": { + "summary": "Reindex from remote on Elastic Cloud", + "description": "When using Elastic Cloud, you can run `POST _reindex` and authenticate against a remote cluster with an API key.\n", + "value": "{\n \"source\": {\n \"remote\": {\n \"host\": \"http://otherhost:9200\",\n \"username\": \"user\",\n \"password\": \"pass\"\n },\n \"index\": \"my-index-000001\",\n \"query\": {\n \"match\": {\n \"test\": \"data\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample2": { + "summary": "Manual slicing", + "description": "Run `POST _reindex` to slice a reindex request manually. Provide a slice ID and total number of slices to each request.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample3": { + "summary": "Automatic slicing", + "description": "Run `POST _reindex?slices=5&refresh` to automatically parallelize using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample4": { + "summary": "Routing", + "description": "By default if reindex sees a document with routing then the routing is preserved unless it's changed by the script. You can set `routing` on the `dest` request to change this behavior. In this example, run `POST _reindex` to copy all documents from the `source` with the company name `cat` into the `dest` with routing set to `cat`.\n", + "value": "{\n \"source\": {\n \"index\": \"source\",\n \"query\": {\n \"match\": {\n \"company\": \"cat\"\n }\n }\n },\n \"dest\": {\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n }\n}" + }, + "ReindexRequestExample5": { + "summary": "Ingest pipelines", + "description": "Run `POST _reindex` and use the ingest pipelines feature.", + "value": "{\n \"source\": {\n \"index\": \"source\"\n },\n \"dest\": {\n \"index\": \"dest\",\n \"pipeline\": \"some_ingest_pipeline\"\n }\n}" + }, + "ReindexRequestExample6": { + "summary": "Reindex with a query", + "description": "Run `POST _reindex` and add a query to the `source` to limit the documents to reindex. For example, this request copies documents into `my-new-index-000001` only if they have a `user.id` of `kimchy`.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample7": { + "summary": "Reindex with max_docs", + "description": "You can limit the number of processed documents by setting `max_docs`. For example, run `POST _reindex` to copy a single document from `my-index-000001` to `my-new-index-000001`.\n", + "value": "{\n \"max_docs\": 1,\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample8": { + "summary": "Reindex selected fields", + "description": "You can use source filtering to reindex a subset of the fields in the original documents. For example, run `POST _reindex` the reindex only the `user.id` and `_doc` fields of each document.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"_source\": [\"user.id\", \"_doc\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample9": { + "summary": "Reindex new field names", + "description": "A reindex operation can build a copy of an index with renamed fields. If your index has documents with `text` and `flag` fields, you can change the latter field name to `tag` during the reindex.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n },\n \"script\": {\n \"source\": \"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\"\n }\n}" + } } } }, @@ -16491,6 +17049,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/search_application._types:SearchApplicationParameters" + }, + "examples": { + "SearchApplicationPutRequestExample1": { + "description": "Run `PUT _application/search_application/my-app` to create or update a search application called `my-app`. When the dictionary parameter is specified, the search application search API will perform the following parameter validation: it accepts only the `query_string` and `default_field` parameters; it verifies that `query_string` and `default_field` are both strings; it accepts `default_field` only if it takes the values title or description. If the parameters are not valid, the search application search API will return an error.\n", + "value": "{\n \"indices\": [ \"index1\", \"index2\" ],\n \"template\": {\n \"script\": {\n \"source\": {\n \"query\": {\n \"query_string\": {\n \"query\": \"{{query_string}}\",\n \"default_field\": \"{{default_field}}\"\n }\n }\n },\n \"params\": {\n \"query_string\": \"*\",\n \"default_field\": \"*\"\n }\n },\n \"dictionary\": {\n \"properties\": {\n \"query_string\": {\n \"type\": \"string\"\n },\n \"default_field\": {\n \"type\": \"string\",\n \"enum\": [\n \"title\",\n \"description\"\n ]\n },\n \"additionalProperties\": false\n },\n \"required\": [\n \"query_string\"\n ]\n }\n }\n}" + } } } }, @@ -17223,6 +17787,12 @@ "enabled", "authentication_type" ] + }, + "examples": { + "SecurityAuthenticateResponseExample1": { + "description": "A successful response from `GET /_security/_authenticate`.", + "value": "{\n \"username\": \"rdeniro\",\n \"roles\": [\n \"admin\"\n ],\n \"full_name\": null,\n \"email\": null,\n \"metadata\": { },\n \"enabled\": true,\n \"authentication_realm\": {\n \"name\" : \"file\",\n \"type\" : \"file\"\n },\n \"lookup_realm\": {\n \"name\" : \"file\",\n \"type\" : \"file\"\n },\n \"authentication_type\": \"realm\"\n}" + } } } } @@ -17339,6 +17909,18 @@ "required": [ "api_keys" ] + }, + "examples": { + "SecurityGetApiKeyResponseExample1": { + "summary": "Get a key by ID", + "description": "A successful response from `GET /_security/api_key?id=VuaCfGcBCdbkQm-e5aOx&with_limited_by=true`.", + "value": "{\n \"api_keys\": [ \n {\n \"id\": \"VuaCfGcBCdbkQm-e5aOx\", \n \"name\": \"my-api-key\", \n \"creation\": 1548550550158, \n \"expiration\": 1548551550158, \n \"invalidated\": false, \n \"username\": \"myuser\", \n \"realm\": \"native1\", \n \"realm_type\": \"native\",\n \"metadata\": { \n \"application\": \"myapp\"\n },\n \"role_descriptors\": { }, \n \"limited_by\": [ \n {\n \"role-power-user\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"*\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n ]\n }\n ]\n}" + }, + "SecurityGetApiKeyResponseExample2": { + "summary": "Get all keys for a user", + "description": "A successful response from `GET /_security/api_key?username=myuser&realm_name=native1`. The response contains all API keys for the user `myuser` in the `native1` realm.\n", + "value": "{\n \"api_keys\": [\n {\n \"id\": \"0GF5GXsBCXxz2eDxWwFN\",\n \"name\": \"hadoop_myuser_key\",\n \"creation\": 1548550550158,\n \"expiration\": 1548551550158,\n \"invalidated\": false,\n \"username\": \"myuser\",\n \"realm\": \"native1\",\n \"realm_type\": \"native\",\n \"metadata\": {\n \"application\": \"myapp\"\n },\n \"role_descriptors\": {\n \"role-a\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index-a\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n },\n {\n \"id\": \"6wHJmcQpReKBa42EHV5SBw\",\n \"name\": \"api-key-name-2\",\n \"creation\": 1548550550158,\n \"invalidated\": false,\n \"username\": \"user-y\",\n \"realm\": \"realm-2\",\n \"metadata\": {},\n \"role_descriptors\": { }\n }\n ]\n}" + } } } } @@ -17434,6 +18016,38 @@ "$ref": "#/components/schemas/_types:Username" } } + }, + "examples": { + "SecurityInvalidateApiKeyRequestExample1": { + "summary": "API keys by ID", + "description": "Run `DELETE /_security/api_key` to invalidate the API keys identified by ID.", + "value": "{\n \"ids\" : [ \"VuaCfGcBCdbkQm-e5aOx\" ]\n}" + }, + "SecurityInvalidateApiKeyRequestExample2": { + "summary": "API keys by name", + "description": "Run `DELETE /_security/api_key` to invalidate the API keys identified by name.", + "value": "{\n \"name\" : \"my-api-key\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample3": { + "summary": "API keys by realm", + "description": "Run `DELETE /_security/api_key` to invalidate all API keys for the `native1` realm.", + "value": "{\n \"realm_name\" : \"native1\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample4": { + "summary": "API keys by user", + "description": "Run `DELETE /_security/api_key` to invalidate all API keys for the user `myuser` in all realms.", + "value": "{\n \"username\" : \"myuser\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample5": { + "summary": "API keys by ID and owner", + "description": "Run `DELETE /_security/api_key` to invalidate the API keys identified by ID if they are owned by the currently authenticated user.", + "value": "{\n \"ids\" : [\"VuaCfGcBCdbkQm-e5aOx\"],\n \"owner\" : \"true\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample6": { + "summary": "API keys by user and realm", + "description": "Run `DELETE /_security/api_key` to invalidate all API keys for the user `myuser` in the `native1` realm .", + "value": "{\n \"username\" : \"myuser\",\n \"realm_name\" : \"native1\"\n}" + } } } }, @@ -17478,6 +18092,12 @@ "invalidated_api_keys", "previously_invalidated_api_keys" ] + }, + "examples": { + "SecurityInvalidateApiKeyResponseExample1": { + "description": "A successful response from `DELETE /_security/api_key`.\n", + "value": "{\n \"invalidated_api_keys\": [ \n \"api-key-id-1\"\n ],\n \"previously_invalidated_api_keys\": [ \n \"api-key-id-2\",\n \"api-key-id-3\"\n ],\n \"error_count\": 2, \n \"error_details\": [ \n {\n \"type\": \"exception\",\n \"reason\": \"error occurred while invalidating api keys\",\n \"caused_by\": {\n \"type\": \"illegal_argument_exception\",\n \"reason\": \"invalid api key id\"\n }\n },\n {\n \"type\": \"exception\",\n \"reason\": \"error occurred while invalidating api keys\",\n \"caused_by\": {\n \"type\": \"illegal_argument_exception\",\n \"reason\": \"invalid api key id\"\n }\n }\n ]\n}" + } } } } @@ -17605,6 +18225,12 @@ "required": [ "found" ] + }, + "examples": { + "SecurityDeleteRoleResponseExample1": { + "description": "A successful response from `DELETE /_security/role/my_admin_role`. If the role is successfully deleted, `found` is set to `true`.\n", + "value": "{\n \"found\" : true\n}" + } } } } @@ -17650,6 +18276,12 @@ "cluster", "index" ] + }, + "examples": { + "SecurityGetBuiltinPrivilegesResponseExample1": { + "description": "A successful response from `GET /_security/privilege/_builtin`.", + "value": "{\n \"cluster\" : [\n \"all\",\n \"cancel_task\",\n \"create_snapshot\",\n \"cross_cluster_replication\",\n \"cross_cluster_search\",\n \"delegate_pki\",\n \"grant_api_key\",\n \"manage\",\n \"manage_api_key\",\n \"manage_autoscaling\",\n \"manage_behavioral_analytics\",\n \"manage_ccr\",\n \"manage_connector\",\n \"manage_data_frame_transforms\",\n \"manage_data_stream_global_retention\",\n \"manage_enrich\",\n \"manage_ilm\",\n \"manage_index_templates\",\n \"manage_inference\",\n \"manage_ingest_pipelines\",\n \"manage_logstash_pipelines\",\n \"manage_ml\",\n \"manage_oidc\",\n \"manage_own_api_key\",\n \"manage_pipeline\",\n \"manage_rollup\",\n \"manage_saml\",\n \"manage_search_application\",\n \"manage_search_query_rules\",\n \"manage_search_synonyms\",\n \"manage_security\",\n \"manage_service_account\",\n \"manage_slm\",\n \"manage_token\",\n \"manage_transform\",\n \"manage_user_profile\",\n \"manage_watcher\",\n \"monitor\",\n \"monitor_connector\",\n \"monitor_data_frame_transforms\",\n \"monitor_data_stream_global_retention\",\n \"monitor_enrich\",\n \"monitor_inference\",\n \"monitor_ml\",\n \"monitor_rollup\",\n \"monitor_snapshot\",\n \"monitor_stats\",\n \"monitor_text_structure\",\n \"monitor_transform\",\n \"monitor_watcher\",\n \"none\",\n \"post_behavioral_analytics_event\",\n \"read_ccr\",\n \"read_connector_secrets\",\n \"read_fleet_secrets\",\n \"read_ilm\",\n \"read_pipeline\",\n \"read_security\",\n \"read_slm\",\n \"transport_client\",\n \"write_connector_secrets\",\n \"write_fleet_secrets\"\n ],\n \"index\" : [\n \"all\",\n \"auto_configure\",\n \"create\",\n \"create_doc\",\n \"create_index\",\n \"cross_cluster_replication\",\n \"cross_cluster_replication_internal\",\n \"delete\",\n \"delete_index\",\n \"index\",\n \"maintenance\",\n \"manage\",\n \"manage_data_stream_lifecycle\",\n \"manage_follow_index\",\n \"manage_ilm\",\n \"manage_leader_index\",\n \"monitor\",\n \"none\",\n \"read\",\n \"read_cross_cluster\",\n \"view_index_metadata\",\n \"write\"\n ],\n \"remote_cluster\" : [\n \"monitor_enrich\",\n \"monitor_stats\"\n ]\n}" + } } } } @@ -17902,6 +18534,18 @@ "$ref": "#/components/schemas/_types:Duration" } } + }, + "examples": { + "RequestExample1": { + "summary": "Update role and metadata", + "description": "Run `PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx` to assign new role descriptors and metadata to an API key.\n", + "value": "{\n \"role_descriptors\": {\n \"role-a\": {\n \"indices\": [\n {\n \"names\": [\"*\"],\n \"privileges\": [\"write\"]\n }\n ]\n }\n },\n \"metadata\": {\n \"environment\": {\n \"level\": 2,\n \"trusted\": true,\n \"tags\": [\"production\"]\n }\n }\n}" + }, + "RequestExample2": { + "summary": "Remove permissions", + "description": "Run `PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx` to remove the API key's previously assigned permissions. It will inherit the owner user's full permissions.\n", + "value": "{\n \"role_descriptors\": {}\n}" + } } } } @@ -17922,6 +18566,13 @@ "required": [ "updated" ] + }, + "examples": { + "ResponseExample1": { + "summary": "Update role and metadata", + "description": "A successful response from `PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx`. The API key's effective permissions after the update will be the intersection of the supplied role descriptors and the owner user's permissions.\n", + "value": "{\n \"updated\": true\n}" + } } } } @@ -17951,6 +18602,12 @@ "required": [ "cursor" ] + }, + "examples": { + "ClearSqlCursorRequestExample1": { + "description": "Run `POST _sql/close` to clear an SQL search cursor.", + "value": "{\n \"cursor\": \"sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=\"\n}" + } } } }, @@ -18340,6 +18997,12 @@ "count", "synonyms_set" ] + }, + "examples": { + "SynonymsGetResponseExample1": { + "description": "A successful response from `GET _synonyms/my-synonyms-set`.", + "value": "{\n \"count\": 3,\n \"synonyms_set\": [\n {\n \"id\": \"test-1\",\n \"synonyms\": \"hello, hi\"\n },\n {\n \"id\": \"test-2\",\n \"synonyms\": \"bye, goodbye\"\n },\n {\n \"id\": \"test-3\",\n \"synonyms\": \"test => check\"\n }\n ]\n}" + } } } } @@ -18496,6 +19159,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/synonyms._types:SynonymRuleRead" + }, + "examples": { + "SynonymRuleGetResponseExample1": { + "description": "A successful response from `GET _synonyms/my-synonyms-set/test-1`.", + "value": "{\n \"id\": \"test-1\",\n \"synonyms\": \"hello, hi\"\n}" + } } } } @@ -18547,6 +19216,13 @@ "required": [ "synonyms" ] + }, + "examples": { + "SynonymRulePutRequestExample1": { + "summary": "synonyms/apis/put-synonym-rule.asciidoc:107", + "description": "", + "value": "{\n \"synonyms\": \"hello, hi, howdy\"\n}" + } } } }, @@ -18559,6 +19235,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/synonyms._types:SynonymsUpdateResult" + }, + "examples": { + "SynonymRuleResponseExample1": { + "description": "A successful response from `PUT _synonyms/my-synonyms-set/test-1`.\n", + "value": "{\n \"result\": \"updated\",\n \"reload_analyzers_details\": {\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"reload_details\": [\n {\n \"index\": \"test-index\",\n \"reloaded_analyzers\": [\n \"my_search_analyzer\"\n ],\n \"reloaded_node_ids\": [\n \"1wYFZzq8Sxeu_Jvt9mlbkg\"\n ]\n }\n ]\n }\n}" + } } } } @@ -18604,6 +19286,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/synonyms._types:SynonymsUpdateResult" + }, + "examples": { + "SynonymRuleDeleteResponseExample1": { + "description": "A successful response from `DELETE _synonyms/my-synonyms-set/test-1`. All analyzers using this synonyms set will be reloaded automatically to reflect the rule being deleted.\n", + "value": "{\n \"result\": \"deleted\",\n \"reload_analyzers_details\": {\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"reload_details\": [\n {\n \"index\": \"test-index\",\n \"reloaded_analyzers\": [\n \"my_search_analyzer\"\n ],\n \"reloaded_node_ids\": [\n \"1wYFZzq8Sxeu_Jvt9mlbkg\"\n ]\n }\n ]\n }\n}" + } } } } @@ -18666,6 +19354,12 @@ "count", "results" ] + }, + "examples": { + "SynonymsSetsGetResponseExample1": { + "description": "A successful response from `GET _synonyms`.", + "value": "{\n \"count\": 3,\n \"results\": [\n {\n \"synonyms_set\": \"ecommerce-synonyms\",\n \"count\": 2\n },\n {\n \"synonyms_set\": \"my-synonyms-set\",\n \"count\": 3\n },\n {\n \"synonyms_set\": \"new-ecommerce-synonyms\",\n \"count\": 1\n }\n ]\n}" + } } } } @@ -19134,6 +19828,18 @@ "dest", "source" ] + }, + "examples": { + "PutTransformRequestExample1": { + "summary": "A pivot transform", + "description": "Run `PUT _transform/ecommerce_transform1` to create a transform that uses the pivot method.", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\",\n \"query\": {\n \"term\": {\n \"geoip.continent_name\": {\n \"value\": \"Asia\"\n }\n }\n }\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\",\n \"missing_bucket\": true\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n },\n \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform1\",\n \"pipeline\": \"add_timestamp_pipeline\"\n },\n \"frequency\": \"5m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"60s\"\n }\n },\n \"retention_policy\": {\n \"time\": {\n \"field\": \"order_date\",\n \"max_age\": \"30d\"\n }\n }\n}" + }, + "PutTransformRequestExample2": { + "summary": "A latest transform", + "description": "Run `PUT _transform/ecommerce_transform2` to create a transform that uses the latest method.", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\"\n },\n \"latest\": {\n \"unique_key\": [\n \"customer_id\"\n ],\n \"sort\": \"order_date\"\n },\n \"description\": \"Latest order for each customer\",\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform2\"\n },\n \"frequency\": \"5m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"60s\"\n }\n }\n}" + } } } }, @@ -19146,6 +19852,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "PutTransformResponseExample1": { + "description": "A successful response when creating a transform.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -19209,6 +19921,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "DeleteTransformResponseExample1": { + "description": "A successful response when the transform is deleted.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -19330,6 +20048,12 @@ "count", "transforms" ] + }, + "examples": { + "GetTransformStatsResponseExample1": { + "description": "A successful response that contains usage information for a transform.", + "value": "{\n \"count\": 1,\n \"transforms\": [\n {\n \"id\": \"ecommerce-customer-transform\",\n \"state\": \"started\",\n \"node\": {\n \"id\": \"cpTIGMsVQ8Gqwqlxxxxxxx\",\n \"name\": \"my.home\",\n \"ephemeral_id\": \"5-L21nFsQxxxxxxxxxx-xx\",\n \"transport_address\": \"127.0.0.1:9300\",\n \"attributes\": {}\n },\n \"stats\": {\n \"pages_processed\": 78,\n \"documents_processed\": 6027,\n \"documents_indexed\": 68,\n \"documents_deleted\": 22,\n \"delete_time_in_ms\": 214,\n \"trigger_count\": 168,\n \"index_time_in_ms\": 412,\n \"index_total\": 20,\n \"index_failures\": 0,\n \"search_time_in_ms\": 353,\n \"search_total\": 78,\n \"search_failures\": 0,\n \"processing_time_in_ms\": 8,\n \"processing_total\": 78,\n \"exponential_avg_checkpoint_duration_ms\": 97.30637923893185,\n \"exponential_avg_documents_indexed\": 2.2064915040974062,\n \"exponential_avg_documents_processed\": 179.89419945785045\n },\n \"checkpointing\": {\n \"last\": {\n \"checkpoint\": 20,\n \"timestamp_millis\": 1585344558220,\n \"time_upper_bound_millis\": 1585344498220\n },\n \"changes_last_detected_at\": 1585344558219\n },\n \"health\": {\n \"status\": \"green\"\n }\n }\n ]\n}" + } } } } @@ -19484,6 +20208,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "ResetTransformResponseExample1": { + "description": "A successful response when the transform is reset.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -19530,6 +20260,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "ScheduleNowTransformResponseExample1": { + "description": "A successful response when the transform is scheduled to run now.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -19586,6 +20322,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "StartTransformResponseExample1": { + "description": "A successful response when a transform starts.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -19672,6 +20414,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:AcknowledgedResponseBase" + }, + "examples": { + "StopTransformResponseExample1": { + "description": "A successful response when a transform stops.", + "value": "{\n \"acknowledged\": true\n}" + } } } } @@ -19762,6 +20510,12 @@ ] } } + }, + "examples": { + "UpdateTransformRequestExample1": { + "description": "Run `POST _transform/simple-kibana-ecomm-pivot/_update` to update a transform that uses the pivot method.", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\",\n \"query\": {\n \"term\": {\n \"geoip.continent_name\": {\n \"value\": \"Asia\"\n }\n }\n }\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\",\n \"missing_bucket\": true\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n },\n \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform1\",\n \"pipeline\": \"add_timestamp_pipeline\"\n },\n \"frequency\": \"5m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"60s\"\n }\n },\n \"retention_policy\": {\n \"time\": {\n \"field\": \"order_date\",\n \"max_age\": \"30d\"\n }\n }\n}" + } } } }, @@ -19827,6 +20581,12 @@ "source", "version" ] + }, + "examples": { + "UpdateTransformResponseExample1": { + "description": "A successful response when creating a transform.", + "value": "{\n \"id\": \"simple-kibana-ecomm-pivot\",\n \"authorization\": {\n \"roles\": [\n \"superuser\"\n ]\n },\n \"version\": \"10.0.0\",\n \"create_time\": 1712951576767,\n \"source\": {\n \"index\": [\n \"kibana_sample_data_ecommerce\"\n ],\n \"query\": {\n \"term\": {\n \"geoip.continent_name\": {\n \"value\": \"Asia\"\n }\n }\n }\n },\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform_v2\",\n \"pipeline\": \"add_timestamp_pipeline\"\n },\n \"frequency\": \"15m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"120s\"\n }\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\",\n \"missing_bucket\": true\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n },\n \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n \"settings\": {},\n \"retention_policy\": {\n \"time\": {\n \"field\": \"order_date\",\n \"max_age\": \"30d\"\n }\n }\n}" + } } } } @@ -20030,6 +20790,63 @@ "type": "object" } } + }, + "examples": { + "UpdateRequestExample1": { + "summary": "Update a counter with a script", + "description": "Run `POST test/_update/1` to increment a counter by using a script.", + "value": "{\n \"script\" : {\n \"source\": \"ctx._source.counter += params.count\",\n \"lang\": \"painless\",\n \"params\" : {\n \"count\" : 4\n }\n }\n}" + }, + "UpdateRequestExample10": { + "summary": "Scripted upsert", + "description": "Run `POST test/_update/1` to perform a scripted upsert. When `scripted_upsert` is `true`, the script runs whether or not the document exists.\n", + "value": "{\n \"scripted_upsert\": true,\n \"script\": {\n \"source\": \"\"\"\n if ( ctx.op == 'create' ) {\n ctx._source.counter = params.count\n } else {\n ctx._source.counter += params.count\n }\n \"\"\",\n \"params\": {\n \"count\": 4\n }\n },\n \"upsert\": {}\n}" + }, + "UpdateRequestExample11": { + "summary": "Doc as upsert", + "description": "Run `POST test/_update/1` to perform a doc as upsert. Instead of sending a partial `doc` plus an `upsert` doc, you can set `doc_as_upsert` to `true` to use the contents of `doc` as the `upsert` value.\n", + "value": "{\n \"doc\": {\n \"name\": \"new_name\"\n },\n \"doc_as_upsert\": true\n}" + }, + "UpdateRequestExample2": { + "summary": "Add a tag with a script", + "description": "Run `POST test/_update/1` to use a script to add a tag to a list of tags. In this example, it is just a list, so the tag is added even it exists.\n", + "value": "{\n \"script\": {\n \"source\": \"ctx._source.tags.add(params.tag)\",\n \"lang\": \"painless\",\n \"params\": {\n \"tag\": \"blue\"\n }\n }\n}" + }, + "UpdateRequestExample3": { + "summary": "Remove a tag with a script", + "description": "Run `POST test/_update/1` to use a script to remove a tag from a list of tags. The Painless function to remove a tag takes the array index of the element you want to remove. To avoid a possible runtime error, you first need to make sure the tag exists. If the list contains duplicates of the tag, this script just removes one occurrence.\n", + "value": "{\n \"script\": {\n \"source\": \"if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }\",\n \"lang\": \"painless\",\n \"params\": {\n \"tag\": \"blue\"\n }\n }\n}" + }, + "UpdateRequestExample4": { + "summary": "Add fields with a script", + "description": "Run `POST test/_update/1` to use a script to add a field `new_field` to the document.\n", + "value": "{\n \"script\" : \"ctx._source.new_field = 'value_of_new_field'\"\n}" + }, + "UpdateRequestExample5": { + "summary": "Remove fields with a script", + "description": "Run `POST test/_update/1` to use a script to remove a field `new_field` from the document.\n", + "value": "{\n \"script\" : \"ctx._source.remove('new_field')\"\n}" + }, + "UpdateRequestExample6": { + "summary": "Remove subfields with a script", + "description": "Run `POST test/_update/1` to use a script to remove a subfield from an object field.\n", + "value": "{\n \"script\": \"ctx._source['my-object'].remove('my-subfield')\"\n}" + }, + "UpdateRequestExample7": { + "summary": "Change the operation with a script", + "description": "Run `POST test/_update/1` to change the operation that runs from within the script. For example, this request deletes the document if the `tags` field contains `green`, otherwise it does nothing (`noop`).\n", + "value": "{\n \"script\": {\n \"source\": \"if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'noop' }\",\n \"lang\": \"painless\",\n \"params\": {\n \"tag\": \"green\"\n }\n }\n}" + }, + "UpdateRequestExample8": { + "summary": "Update part of a document", + "description": "Run `POST test/_update/1` to do a partial update that adds a new field to the existing document.\n", + "value": "{\n \"doc\": {\n \"name\": \"new_name\"\n }\n}" + }, + "UpdateRequestExample9": { + "summary": "Upsert", + "description": "Run `POST test/_update/1` to perfom an upsert. If the document does not already exist, the contents of the upsert element are inserted as a new document. If the document exists, the script is run.\n", + "value": "{\n \"script\": {\n \"source\": \"ctx._source.counter += params.count\",\n \"lang\": \"painless\",\n \"params\": {\n \"count\": 4\n }\n },\n \"upsert\": {\n \"counter\": 1\n }\n}" + } } } }, @@ -20042,6 +20859,13 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_global.update:UpdateWriteResponseBase" + }, + "examples": { + "UpdateResponseExample1": { + "summary": "Detect noop updates", + "description": "By default updates that don't change anything detect that they don't change anything and return `\"result\": \"noop\"`.\n", + "value": "{\n \"_shards\": {\n \"total\": 0,\n \"successful\": 0,\n \"failed\": 0\n },\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"_version\": 2,\n \"_primary_term\": 1,\n \"_seq_no\": 1,\n \"result\": \"noop\"\n}" + } } } } @@ -20409,6 +21233,28 @@ "$ref": "#/components/schemas/_types:Conflicts" } } + }, + "examples": { + "UpdateByQueryRequestExample1": { + "summary": "Update selected documents", + "description": "Run `POST my-index-000001/_update_by_query?conflicts=proceed` to update documents that match a query.\n", + "value": "{\n \"query\": { \n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n}" + }, + "UpdateByQueryRequestExample2": { + "summary": "Update the document source", + "description": "Run `POST my-index-000001/_update_by_query` with a script to update the document source. It increments the `count` field for all documents with a `user.id` of `kimchy` in `my-index-000001`.\n", + "value": "{\n \"script\": {\n \"source\": \"ctx._source.count++\",\n \"lang\": \"painless\"\n },\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n}" + }, + "UpdateByQueryRequestExample3": { + "summary": "Slice manually", + "description": "Run `POST my-index-000001/_update_by_query` to slice an update by query manually. Provide a slice ID and total number of slices to each request.\n", + "value": "{\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n },\n \"script\": {\n \"source\": \"ctx._source['extra'] = 'test'\"\n }\n}" + }, + "UpdateByQueryRequestExample4": { + "summary": "Slice automatically", + "description": "Run `POST my-index-000001/_update_by_query?refresh&slices=5` to use automatic slicing. It automatically parallelizes using sliced scroll to slice on `_id`.\n", + "value": "{\n \"script\": {\n \"source\": \"ctx._source['extra'] = 'test'\"\n }\n}" + } } } } @@ -56317,6 +57163,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/async_search._types:AsyncSearchDocumentResponseBase" + }, + "examples": { + "AsyncSearchSubmitResponseExample1": { + "description": "A successful response when performing search asynchronously.", + "value": "{\n \"id\" : \"FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=\",\n \"is_partial\" : true,\n \"is_running\" : true,\n \"start_time_in_millis\" : 1583945890986,\n \"expiration_time_in_millis\" : 1584377890986,\n \"response\" : {\n \"took\" : 1122,\n \"timed_out\" : false,\n \"num_reduce_phases\" : 0,\n \"_shards\" : {\n \"total\" : 562,\n \"successful\" : 3,\n \"skipped\" : 0,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : {\n \"value\" : 157483,\n \"relation\" : \"gte\"\n },\n \"max_score\" : null,\n \"hits\" : [ ]\n }\n }\n}" + } } } } @@ -56357,6 +57209,22 @@ "items", "took" ] + }, + "examples": { + "BulkResponseExample1": { + "summary": "Multiple successful operations", + "value": "{\n \"took\": 30,\n \"errors\": false,\n \"items\": [\n {\n \"index\": {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"_version\": 1,\n \"result\": \"created\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"status\": 201,\n \"_seq_no\" : 0,\n \"_primary_term\": 1\n }\n },\n {\n \"delete\": {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"_version\": 1,\n \"result\": \"not_found\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"status\": 404,\n \"_seq_no\" : 1,\n \"_primary_term\" : 2\n }\n },\n {\n \"create\": {\n \"_index\": \"test\",\n \"_id\": \"3\",\n \"_version\": 1,\n \"result\": \"created\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"status\": 201,\n \"_seq_no\" : 2,\n \"_primary_term\" : 3\n }\n },\n {\n \"update\": {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"_version\": 2,\n \"result\": \"updated\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"status\": 200,\n \"_seq_no\" : 3,\n \"_primary_term\" : 4\n }\n }\n ]\n}" + }, + "BulkResponseExample2": { + "summary": "Failed actions", + "description": "If you run `POST /_bulk` with operations that update non-existent documents, the operations cannot complete successfully. The API returns a response with an `errors` property value `true`. The response also includes an error object for any failed operations. The error object contains additional information about the failure, such as the error type and reason.\n", + "value": "{\n \"took\": 486,\n \"errors\": true,\n \"items\": [\n {\n \"update\": {\n \"_index\": \"index1\",\n \"_id\": \"5\",\n \"status\": 404,\n \"error\": {\n \"type\": \"document_missing_exception\",\n \"reason\": \"[5]: document missing\",\n \"index_uuid\": \"aAsFqTI0Tc2W0LCWgPNrOA\",\n \"shard\": \"0\",\n \"index\": \"index1\"\n }\n }\n },\n {\n \"update\": {\n \"_index\": \"index1\",\n \"_id\": \"6\",\n \"status\": 404,\n \"error\": {\n \"type\": \"document_missing_exception\",\n \"reason\": \"[6]: document missing\",\n \"index_uuid\": \"aAsFqTI0Tc2W0LCWgPNrOA\",\n \"shard\": \"0\",\n \"index\": \"index1\"\n }\n }\n },\n {\n \"create\": {\n \"_index\": \"index1\",\n \"_id\": \"7\",\n \"_version\": 1,\n \"result\": \"created\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"status\": 201\n }\n }\n ]\n}" + }, + "BulkResponseExample3": { + "summary": "Filter for failed operations", + "description": "An example response from `POST /_bulk?filter_path=items.*.error`, which returns only information about failed operations.\n", + "value": "{\n \"items\": [\n {\n \"update\": {\n \"error\": {\n \"type\": \"document_missing_exception\",\n \"reason\": \"[5]: document missing\",\n \"index_uuid\": \"aAsFqTI0Tc2W0LCWgPNrOA\",\n \"shard\": \"0\",\n \"index\": \"index1\"\n }\n }\n },\n {\n \"update\": {\n \"error\": {\n \"type\": \"document_missing_exception\",\n \"reason\": \"[6]: document missing\",\n \"index_uuid\": \"aAsFqTI0Tc2W0LCWgPNrOA\",\n \"shard\": \"0\",\n \"index\": \"index1\"\n }\n }\n }\n ]\n}" + } } } } @@ -56551,6 +57419,11 @@ "result", "id" ] + }, + "examples": { + "ConnectorPutResponseExample1": { + "value": "{\n \"result\": \"created\",\n \"id\": \"my-connector\"\n}" + } } } } @@ -56614,6 +57487,13 @@ "application/json": { "schema": { "$ref": "#/components/schemas/eql._types:EqlSearchResponseBase" + }, + "examples": { + "EqlSearchResponseExample2": { + "summary": "A successful response for performing search with an EQL query.", + "description": "", + "value": "{\n \"is_partial\": false,\n \"is_running\": false,\n \"took\": 6,\n \"timed_out\": false,\n \"hits\": {\n \"total\": {\n \"value\": 1,\n \"relation\": \"eq\"\n },\n \"sequences\": [\n {\n \"join_keys\": [\n 2012\n ],\n \"events\": [\n {\n \"_index\": \".ds-my-data-stream-2099.12.07-000001\",\n \"_id\": \"AtOJ4UjUBAAx3XR5kcCM\",\n \"_source\": {\n \"@timestamp\": \"2099-12-06T11:04:07.000Z\",\n \"event\": {\n \"category\": \"file\",\n \"id\": \"dGCHwoeS\",\n \"sequence\": 2\n },\n \"file\": {\n \"accessed\": \"2099-12-07T11:07:08.000Z\",\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\",\n \"type\": \"file\",\n \"size\": 16384\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"cmd.exe\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n }\n },\n {\n \"_index\": \".ds-my-data-stream-2099.12.07-000001\",\n \"_id\": \"OQmfCaduce8zoHT93o4H\",\n \"_source\": {\n \"@timestamp\": \"2099-12-07T11:07:09.000Z\",\n \"event\": {\n \"category\": \"process\",\n \"id\": \"aR3NWVOs\",\n \"sequence\": 4\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"regsvr32.exe\",\n \"command_line\": \"regsvr32.exe /s /u /i:https://...RegSvr32.sct scrobj.dll\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\regsvr32.exe\"\n }\n }\n }\n ]\n }\n ]\n }\n}" + } } } } @@ -56646,6 +57526,12 @@ "_id", "matched" ] + }, + "examples": { + "ExplainResponseExample1": { + "description": "A successful response from `GET /my-index-000001/_explain/0`.", + "value": "{\n \"_index\":\"my-index-000001\",\n \"_id\":\"0\",\n \"matched\":true,\n \"explanation\":{\n \"value\":1.6943598,\n \"description\":\"weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:\",\n \"details\":[\n {\n \"value\":1.6943598,\n \"description\":\"score(freq=1.0), computed as boost * idf * tf from:\",\n \"details\":[\n {\n \"value\":2.2,\n \"description\":\"boost\",\n \"details\":[]\n },\n {\n \"value\":1.3862944,\n \"description\":\"idf, computed as log(1 + (N - n + 0.5) / (n + 0.5)) from:\",\n \"details\":[\n {\n \"value\":1,\n \"description\":\"n, number of documents containing term\",\n \"details\":[]\n },\n {\n \"value\":5,\n \"description\":\"N, total number of documents with field\",\n \"details\":[]\n }\n ]\n },\n {\n \"value\":0.5555556,\n \"description\":\"tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:\",\n \"details\":[\n {\n \"value\":1.0,\n \"description\":\"freq, occurrences of term within document\",\n \"details\":[]\n },\n {\n \"value\":1.2,\n \"description\":\"k1, term saturation parameter\",\n \"details\":[]\n },\n {\n \"value\":0.75,\n \"description\":\"b, length normalization parameter\",\n \"details\":[]\n },\n {\n \"value\":3.0,\n \"description\":\"dl, length of field\",\n \"details\":[]\n },\n {\n \"value\":5.4,\n \"description\":\"avgdl, average length of field\",\n \"details\":[]\n }\n ]\n }\n ]\n }\n ]\n }\n}" + } } } } @@ -56674,6 +57560,18 @@ "indices", "fields" ] + }, + "examples": { + "FieldCapabilitiesResponseExample1": { + "summary": "Get two fields", + "description": "A successful response from `GET _field_caps?fields=rating,title`. The field `rating` is defined as a long in `index1` and `index2` and as a `keyword` in `index3` and `index4`. The field `rating` is not aggregatable in `index1`. The field `rating` is not searchable in `index4`. The field `title` is defined as text in all indices.\n", + "value": "{\n \"indices\": [ \"index1\", \"index2\", \"index3\", \"index4\", \"index5\" ],\n \"fields\": {\n \"rating\": { \n \"long\": {\n \"metadata_field\": false,\n \"searchable\": true,\n \"aggregatable\": false,\n \"indices\": [ \"index1\", \"index2\" ],\n \"non_aggregatable_indices\": [ \"index1\" ] \n },\n \"keyword\": {\n \"metadata_field\": false,\n \"searchable\": false,\n \"aggregatable\": true,\n \"indices\": [ \"index3\", \"index4\" ],\n \"non_searchable_indices\": [ \"index4\" ] \n }\n },\n \"title\": { \n \"text\": {\n \"metadata_field\": false,\n \"searchable\": true,\n \"aggregatable\": false\n }\n }\n }\n}" + }, + "FieldCapabilitiesResponseExample2": { + "summary": "Get unmapped fields", + "description": "A successful response from `GET _field_caps?fields=rating,title&include_unmapped`. The response contains an entry for each field that is present in some indices but not all. For example, the `rating` and `title` fields are unmapped in `index5`.\n", + "value": "{\n \"indices\": [ \"index1\", \"index2\", \"index3\", \"index4\", \"index5\" ],\n \"fields\": {\n \"rating\": { \n \"long\": {\n \"metadata_field\": false,\n \"searchable\": true,\n \"aggregatable\": false,\n \"indices\": [ \"index1\", \"index2\" ],\n \"non_aggregatable_indices\": [ \"index1\" ] \n },\n \"keyword\": {\n \"metadata_field\": false,\n \"searchable\": false,\n \"aggregatable\": true,\n \"indices\": [ \"index3\", \"index4\" ],\n \"non_searchable_indices\": [ \"index4\" ] \n }\n },\n \"title\": { \n \"text\": {\n \"metadata_field\": false,\n \"searchable\": true,\n \"aggregatable\": false\n }\n }\n }\n}" + } } } } @@ -56727,6 +57625,18 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:WriteResponseBase" + }, + "examples": { + "IndexResponseExample1": { + "summary": "Automate document IDs", + "description": "A successful response from `POST my-index-000001/_doc/`, which contains an automated document ID.", + "value": "{\n \"_shards\": {\n \"total\": 2,\n \"failed\": 0,\n \"successful\": 2\n },\n \"_index\": \"my-index-000001\",\n \"_id\": \"W0tpsmIBdwcYyG50zbta\",\n \"_version\": 1,\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"result\": \"created\"\n}" + }, + "IndexResponseExample2": { + "summary": "Define document IDs", + "description": "A successful response from `PUT my-index-000001/_doc/1`.", + "value": "{\n \"_shards\": {\n \"total\": 2,\n \"failed\": 0,\n \"successful\": 2\n },\n \"_index\": \"my-index-000001\",\n \"_id\": \"1\",\n \"_version\": 1,\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"result\": \"created\"\n}" + } } } } @@ -57035,6 +57945,28 @@ "application/json": { "schema": { "$ref": "#/components/schemas/inference._types:InferenceResult" + }, + "examples": { + "InferenceResponseExample1": { + "summary": "Completion task", + "description": "A successful response from `POST _inference/completion/openai_chat_completions`.\n", + "value": "{\n \"completion\": [\n {\n \"result\": \"Elastic is a company that provides a range of software solutions for search, logging, security, and analytics. Their flagship product is Elasticsearch, an open-source, distributed search engine that allows users to search, analyze, and visualize large volumes of data in real-time. Elastic also offers products such as Kibana, a data visualization tool, and Logstash, a log management and pipeline tool, as well as various other tools and solutions for data analysis and management.\"\n }\n ]\n}" + }, + "InferenceResponseExample2": { + "summary": "Rerank task", + "description": "A successful response from `POST _inference/rerank/cohere_rerank`.\n", + "value": "{\n \"rerank\": [\n {\n \"index\": \"2\",\n \"relevance_score\": \"0.011597361\",\n \"text\": \"leia\"\n },\n {\n \"index\": \"0\",\n \"relevance_score\": \"0.006338922\",\n \"text\": \"luke\"\n },\n {\n \"index\": \"5\",\n \"relevance_score\": \"0.0016166499\",\n \"text\": \"star\"\n },\n {\n \"index\": \"4\",\n \"relevance_score\": \"0.0011695103\",\n \"text\": \"r2d2\"\n },\n {\n \"index\": \"1\",\n \"relevance_score\": \"5.614787E-4\",\n \"text\": \"like\"\n },\n {\n \"index\": \"6\",\n \"relevance_score\": \"3.7850367E-4\",\n \"text\": \"wars\"\n },\n {\n \"index\": \"3\",\n \"relevance_score\": \"1.2508839E-5\",\n \"text\": \"chewy\"\n }\n ]\n}" + }, + "InferenceResponseExample3": { + "summary": "Sparse embedding task", + "description": "An abbreviated response from `POST _inference/sparse_embedding/my-elser-model`.\n", + "value": "{\n \"sparse_embedding\": [\n {\n \"port\": 2.1259406,\n \"sky\": 1.7073475,\n \"color\": 1.6922266,\n \"dead\": 1.6247464,\n \"television\": 1.3525393,\n \"above\": 1.2425821,\n \"tuned\": 1.1440028,\n \"colors\": 1.1218185,\n \"tv\": 1.0111054,\n \"ports\": 1.0067928,\n \"poem\": 1.0042328,\n \"channel\": 0.99471164,\n \"tune\": 0.96235967,\n \"scene\": 0.9020516\n }\n ]\n}" + }, + "InferenceResponseExample4": { + "summary": "Text embedding task", + "description": "An abbreviated response from `POST _inference/text_embedding/my-cohere-endpoint`.\n", + "value": "{\n \"text_embedding\": [\n {\n \"embedding\": [\n {\n 0.018569946,\n -0.036895752,\n 0.01486969,\n -0.0045204163,\n -0.04385376,\n 0.0075950623,\n 0.04260254,\n -0.004005432,\n 0.007865906,\n 0.030792236,\n -0.050476074,\n 0.011795044,\n -0.011642456,\n -0.010070801\n }\n ]\n }\n ]\n}" + } } } } @@ -57089,6 +58021,12 @@ "required": [ "docs" ] + }, + "examples": { + "SimulatePipelineResponseExample1": { + "description": "A successful response for running an ingest pipeline against a set of provided documents.", + "value": "{\n \"docs\": [\n {\n \"doc\": {\n \"_id\": \"id\",\n \"_index\": \"index\",\n \"_version\": \"-3\",\n \"_source\": {\n \"field2\": \"_value\",\n \"foo\": \"bar\"\n },\n \"_ingest\": {\n \"timestamp\": \"2017-05-04T22:30:03.187Z\"\n }\n }\n },\n {\n \"doc\": {\n \"_id\": \"id\",\n \"_index\": \"index\",\n \"_version\": \"-3\",\n \"_source\": {\n \"field2\": \"_value\",\n \"foo\": \"rab\"\n },\n \"_ingest\": {\n \"timestamp\": \"2017-05-04T22:30:03.188Z\"\n }\n }\n }\n ]\n}" + } } } } @@ -57588,6 +58526,23 @@ "required": [ "result" ] + }, + "examples": { + "ExecutePainlessScriptResponseExample1": { + "summary": "Test context", + "description": "A successful response from `POST /_scripts/painless/_execute` with a `painless_test` context.", + "value": "{\n \"result\": \"0.1\"\n}" + }, + "ExecutePainlessScriptResponseExample2": { + "summary": "Filter context", + "description": "A successful response from `POST /_scripts/painless/_execute` with a `filter` context.", + "value": "{\n \"result\": true\n}" + }, + "ExecutePainlessScriptResponseExample3": { + "summary": "Score context", + "description": "A successful response from `POST /_scripts/painless/_execute` with a `score` context.", + "value": "{\n \"result\": 0.8\n}" + } } } } @@ -57641,6 +58596,12 @@ "application/json": { "schema": { "$ref": "#/components/schemas/_types:MapboxVectorTiles" + }, + "examples": { + "SearchMvtResponseExample1": { + "description": "A successful response from `GET museums/_mvt/location/13/4207/2692`. It returns results as a binary vector tile. When decoded into JSON, the tile contains the following data.\n", + "value": "{\n \"hits\": {\n \"extent\": 4096,\n \"version\": 2,\n \"features\": [\n {\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n 3208,\n 3864\n ]\n },\n \"properties\": {\n \"_id\": \"1\",\n \"_index\": \"museums\",\n \"name\": \"NEMO Science Museum\",\n \"price\": 1750\n },\n \"type\": 1\n },\n {\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n 3429,\n 3496\n ]\n },\n \"properties\": {\n \"_id\": \"3\",\n \"_index\": \"museums\",\n \"name\": \"Nederlands Scheepvaartmuseum\",\n \"price\": 1650\n },\n \"type\": 1\n },\n {\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n 3429,\n 3496\n ]\n },\n \"properties\": {\n \"_id\": \"4\",\n \"_index\": \"museums\",\n \"name\": \"Amsterdam Centre for Architecture\",\n \"price\": 0\n },\n \"type\": 1\n }\n ]\n },\n \"aggs\": {\n \"extent\": 4096,\n \"version\": 2,\n \"features\": [\n {\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n 3072,\n 3072\n ],\n [\n 4096,\n 3072\n ],\n [\n 4096,\n 4096\n ],\n [\n 3072,\n 4096\n ],\n [\n 3072,\n 3072\n ]\n ]\n ]\n },\n \"properties\": {\n \"_count\": 3,\n \"max_price.value\": 1750.0,\n \"min_price.value\": 0.0,\n \"avg_price.value\": 1133.3333333333333\n },\n \"type\": 3\n }\n ]\n },\n \"meta\": {\n \"extent\": 4096,\n \"version\": 2,\n \"features\": [\n {\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n 0,\n 0\n ],\n [\n 4096,\n 0\n ],\n [\n 4096,\n 4096\n ],\n [\n 0,\n 4096\n ],\n [\n 0,\n 0\n ]\n ]\n ]\n },\n \"properties\": {\n \"_shards.failed\": 0,\n \"_shards.skipped\": 0,\n \"_shards.successful\": 1,\n \"_shards.total\": 1,\n \"aggregations._count.avg\": 3.0,\n \"aggregations._count.count\": 1,\n \"aggregations._count.max\": 3.0,\n \"aggregations._count.min\": 3.0,\n \"aggregations._count.sum\": 3.0,\n \"aggregations.avg_price.avg\": 1133.3333333333333,\n \"aggregations.avg_price.count\": 1,\n \"aggregations.avg_price.max\": 1133.3333333333333,\n \"aggregations.avg_price.min\": 1133.3333333333333,\n \"aggregations.avg_price.sum\": 1133.3333333333333,\n \"aggregations.max_price.avg\": 1750.0,\n \"aggregations.max_price.count\": 1,\n \"aggregations.max_price.max\": 1750.0,\n \"aggregations.max_price.min\": 1750.0,\n \"aggregations.max_price.sum\": 1750.0,\n \"aggregations.min_price.avg\": 0.0,\n \"aggregations.min_price.count\": 1,\n \"aggregations.min_price.max\": 0.0,\n \"aggregations.min_price.min\": 0.0,\n \"aggregations.min_price.sum\": 0.0,\n \"hits.max_score\": 0.0,\n \"hits.total.relation\": \"eq\",\n \"hits.total.value\": 3,\n \"timed_out\": false,\n \"took\": 2\n },\n \"type\": 3\n }\n ]\n }\n}" + } } } } @@ -57749,6 +58710,12 @@ "name", "encoded" ] + }, + "examples": { + "SecurityCreateApiKeyResponseExample1": { + "description": "A successful response from `POST /_security/api_key`.", + "value": "{\n \"id\": \"VuaCfGcBCdbkQm-e5aOx\", \n \"name\": \"my-api-key\",\n \"expiration\": 1544068612110, \n \"api_key\": \"ui2lp2axTNmsyakw9tvNnw\", \n \"encoded\": \"VnVhQ2ZHY0JDZGJrUW0tZTVhT3g6dWkybHAyYXhUTm1zeWFrdzl0dk5udw==\" \n}" + } } } } @@ -57762,6 +58729,12 @@ "additionalProperties": { "$ref": "#/components/schemas/security.get_role:Role" } + }, + "examples": { + "SecurityGetRoleResponseExample1": { + "description": "A successful response from `GET /_security/role/my_admin_role`. The response contains information about the `my_admin_role` role in the native realm.\n", + "value": "{\n \"my_admin_role\": {\n \"description\": \"Grants full access to all management features within the cluster.\",\n \"cluster\" : [ \"all\" ],\n \"indices\" : [\n {\n \"names\" : [ \"index1\", \"index2\" ],\n \"privileges\" : [ \"all\" ],\n \"allow_restricted_indices\" : false,\n \"field_security\" : {\n \"grant\" : [ \"title\", \"body\" ]}\n }\n ],\n \"applications\" : [ ],\n \"run_as\" : [ \"other_user\" ],\n \"metadata\" : {\n \"version\" : 1\n },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n}" + } } } } @@ -57802,6 +58775,12 @@ "index", "username" ] + }, + "examples": { + "SecurityHasPrivilegesResponseExample1": { + "description": "A successful response from `GET /_security/user/_has_privileges`, which lists the privileges for the `rdeniro` user.", + "value": "{\n \"username\": \"rdeniro\",\n \"has_all_requested\" : false,\n \"cluster\" : {\n \"monitor\" : true,\n \"manage\" : false\n },\n \"index\" : {\n \"suppliers\" : {\n \"read\" : true\n },\n \"products\" : {\n \"read\" : true\n },\n \"inventory\" : {\n \"read\" : true,\n \"write\" : false\n }\n },\n \"application\" : {\n \"inventory_manager\" : {\n \"product/1852563\" : {\n \"read\": false,\n \"data:write/inventory\": false\n }\n }\n }\n}" + } } } } @@ -57820,6 +58799,12 @@ "required": [ "role" ] + }, + "examples": { + "SecurityPutRoleResponseExample1": { + "description": "A successful response from `POST /_security/role/my_admin_role`.", + "value": "{\n \"role\": {\n \"created\": true \n }\n}" + } } } } @@ -57859,6 +58844,23 @@ "count", "api_keys" ] + }, + "examples": { + "QueryApiKeysResponseExample1": { + "summary": "Query API keys by ID", + "description": "A successful response from `GET /_security/_query/api_key?with_limited_by=true`. The `limited_by` details are the owner user's permissions associated with the API key. It is a point-in-time snapshot captured at creation and subsequent updates. An API key's effective permissions are an intersection of its assigned privileges and the owner user's permissions.\n", + "value": "{\n \"api_keys\": [\n {\n \"id\": \"VuaCfGcBCdbkQm-e5aOx\",\n \"name\": \"application-key-1\",\n \"creation\": 1548550550158,\n \"expiration\": 1548551550158,\n \"invalidated\": false,\n \"username\": \"myuser\",\n \"realm\": \"native1\",\n \"realm_type\": \"native\",\n \"metadata\": {\n \"application\": \"my-application\"\n },\n \"role_descriptors\": { },\n \"limited_by\": [ \n {\n \"role-power-user\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"*\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n ]\n }\n ]\n}" + }, + "QueryApiKeysResponseExample2": { + "summary": "Query API keys with pagination", + "description": "An abbreviated response from `GET /_security/_query/api_key` that contains a list of matched API keys along with their sort values. The first sort value is creation time, which is displayed in `date_time` format. The second sort value is the API key name.\n", + "value": "{\n \"total\": 100,\n \"count\": 10,\n \"api_keys\": [\n {\n \"id\": \"CLXgVnsBOGkf8IyjcXU7\",\n \"name\": \"app1-key-79\",\n \"creation\": 1629250154811,\n \"invalidated\": false,\n \"username\": \"org-admin-user\",\n \"realm\": \"native1\",\n \"metadata\": {\n \"environment\": \"production\"\n },\n \"role_descriptors\": { },\n \"_sort\": [\n \"2021-08-18T01:29:14.811Z\", \n \"app1-key-79\" \n ]\n },\n {\n \"id\": \"BrXgVnsBOGkf8IyjbXVB\",\n \"name\": \"app1-key-78\",\n \"creation\": 1629250153794,\n \"invalidated\": false,\n \"username\": \"org-admin-user\",\n \"realm\": \"native1\",\n \"metadata\": {\n \"environment\": \"production\"\n },\n \"role_descriptors\": { },\n \"_sort\": [\n \"2021-08-18T01:29:13.794Z\",\n \"app1-key-78\"\n ]\n }\n ]\n}" + }, + "QueryApiKeysResponseExample3": { + "summary": "Query all API keys", + "description": "A successful response from `GET /_security/_query/api_key`. It includes the role descriptors that are assigned to each API key when it was created or last updated. Note that an API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of the owner user's permissions. An empty role descriptors object means the API key inherits the owner user's permissions.\n", + "value": "{\n \"total\": 3,\n \"count\": 3,\n \"api_keys\": [ \n {\n \"id\": \"nkvrGXsB8w290t56q3Rg\",\n \"name\": \"my-api-key-1\",\n \"creation\": 1628227480421,\n \"expiration\": 1629091480421,\n \"invalidated\": false,\n \"username\": \"elastic\",\n \"realm\": \"reserved\",\n \"realm_type\": \"reserved\",\n \"metadata\": {\n \"letter\": \"a\"\n },\n \"role_descriptors\": { \n \"role-a\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index-a\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n },\n {\n \"id\": \"oEvrGXsB8w290t5683TI\",\n \"name\": \"my-api-key-2\",\n \"creation\": 1628227498953,\n \"expiration\": 1628313898953,\n \"invalidated\": false,\n \"username\": \"elastic\",\n \"realm\": \"reserved\",\n \"metadata\": {\n \"letter\": \"b\"\n },\n \"role_descriptors\": { } \n }\n ]\n}" + } } } } @@ -57891,6 +58893,18 @@ "count", "roles" ] + }, + "examples": { + "QueryRolesResponseExample1": { + "summary": "Query roles by name", + "description": "A successful response from `POST /_security/_query/role`. It returns a JSON structure that contains the information retrieved for one or more roles.\n", + "value": "{\n \"total\": 2,\n \"count\": 2,\n \"roles\": [ \n {\n \"name\" : \"my_admin_role\",\n \"cluster\" : [\n \"all\"\n ],\n \"indices\" : [\n {\n \"names\" : [\n \"index1\",\n \"index2\"\n ],\n \"privileges\" : [\n \"all\"\n ],\n \"field_security\" : {\n \"grant\" : [\n \"title\",\n \"body\"\n ]\n },\n \"allow_restricted_indices\" : false\n }\n ],\n \"applications\" : [ ],\n \"run_as\" : [\n \"other_user\"\n ],\n \"metadata\" : {\n \"version\" : 1\n },\n \"transient_metadata\" : {\n \"enabled\" : true\n },\n \"description\" : \"Grants full access to all management features within the cluster.\",\n \"_sort\" : [\n \"my_admin_role\"\n ]\n },\n {\n \"name\" : \"my_user_role\",\n \"cluster\" : [ ],\n \"indices\" : [\n {\n \"names\" : [\n \"index1\",\n \"index2\"\n ],\n \"privileges\" : [\n \"all\"\n ],\n \"field_security\" : {\n \"grant\" : [\n \"title\",\n \"body\"\n ]\n },\n \"allow_restricted_indices\" : false\n }\n ],\n \"applications\" : [ ],\n \"run_as\" : [ ],\n \"metadata\" : {\n \"version\" : 1\n },\n \"transient_metadata\" : {\n \"enabled\" : true\n },\n \"description\" : \"Grants user access to some indicies.\",\n \"_sort\" : [\n \"my_user_role\"\n ]\n }\n ]\n}" + }, + "QueryRolesResponseExample2": { + "summary": "Query roles by description", + "description": "A successful response from `POST /_security/_query/role`.\n", + "value": "{\n \"total\": 2,\n \"count\": 1,\n \"roles\": [\n {\n \"name\" : \"my_user_role\",\n \"cluster\" : [ ],\n \"indices\" : [\n {\n \"names\" : [\n \"index1\",\n \"index2\"\n ],\n \"privileges\" : [\n \"all\"\n ],\n \"field_security\" : {\n \"grant\" : [\n \"title\",\n \"body\"\n ]\n },\n \"allow_restricted_indices\" : false\n }\n ],\n \"applications\" : [ ],\n \"run_as\" : [ ],\n \"metadata\" : {\n \"version\" : 1\n },\n \"transient_metadata\" : {\n \"enabled\" : true\n },\n \"description\" : \"Grants user access to some indicies.\"\n }\n ]\n}" + } } } } @@ -58001,6 +59015,12 @@ "terms", "complete" ] + }, + "examples": { + "TermsEnumResponseExample1": { + "description": "A successful response from `POST stackoverflow/_terms_enum`.", + "value": "{\n \"_shards\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0\n },\n \"terms\": [\n \"kibana\"\n ],\n \"complete\" : true\n}" + } } } } @@ -58040,6 +59060,23 @@ "took", "_version" ] + }, + "examples": { + "TermVectorsResponseExample1": { + "summary": "Return stored term vectors", + "description": "A successful response from `GET /my-index-000001/_termvectors/1`.", + "value": "{\n \"_index\": \"my-index-000001\",\n \"_id\": \"1\",\n \"_version\": 1,\n \"found\": true,\n \"took\": 6,\n \"term_vectors\": {\n \"text\": {\n \"field_statistics\": {\n \"sum_doc_freq\": 4,\n \"doc_count\": 2,\n \"sum_ttf\": 6\n },\n \"terms\": {\n \"test\": {\n \"doc_freq\": 2,\n \"ttf\": 4,\n \"term_freq\": 3,\n \"tokens\": [\n {\n \"position\": 0,\n \"start_offset\": 0,\n \"end_offset\": 4,\n \"payload\": \"d29yZA==\"\n },\n {\n \"position\": 1,\n \"start_offset\": 5,\n \"end_offset\": 9,\n \"payload\": \"d29yZA==\"\n },\n {\n \"position\": 2,\n \"start_offset\": 10,\n \"end_offset\": 14,\n \"payload\": \"d29yZA==\"\n }\n ]\n }\n }\n }\n }\n}" + }, + "TermVectorsResponseExample2": { + "summary": "Per-field analyzer", + "description": "A successful response from `GET /my-index-000001/_termvectors` with `per_field_analyzer` in the request body.", + "value": "{\n \"_index\": \"my-index-000001\",\n \"_version\": 0,\n \"found\": true,\n \"took\": 6,\n \"term_vectors\": {\n \"fullname\": {\n \"field_statistics\": {\n \"sum_doc_freq\": 2,\n \"doc_count\": 4,\n \"sum_ttf\": 4\n },\n \"terms\": {\n \"John Doe\": {\n \"term_freq\": 1,\n \"tokens\": [\n {\n \"position\": 0,\n \"start_offset\": 0,\n \"end_offset\": 8\n }\n ]\n }\n }\n }\n }\n}" + }, + "TermVectorsResponseExample3": { + "summary": "Terms filtering", + "description": "A successful response from `GET /my-index-000001/_termvectors` with a `filter` in the request body.", + "value": "{\n \"_index\": \"imdb\",\n \"_version\": 0,\n \"found\": true,\n \"term_vectors\": {\n \"plot\": {\n \"field_statistics\": {\n \"sum_doc_freq\": 3384269,\n \"doc_count\": 176214,\n \"sum_ttf\": 3753460\n },\n \"terms\": {\n \"armored\": {\n \"doc_freq\": 27,\n \"ttf\": 27,\n \"term_freq\": 1,\n \"score\": 9.74725\n },\n \"industrialist\": {\n \"doc_freq\": 88,\n \"ttf\": 88,\n \"term_freq\": 1,\n \"score\": 8.590818\n },\n \"stark\": {\n \"doc_freq\": 44,\n \"ttf\": 47,\n \"term_freq\": 1,\n \"score\": 9.272792\n }\n }\n }\n }\n}" + } } } } @@ -58065,6 +59102,12 @@ "count", "transforms" ] + }, + "examples": { + "GetTransformResponseExample1": { + "description": "A successful response that contains configuration information for a transform.", + "value": "{\n \"count\": 1,\n \"transforms\": [\n {\n \"id\": \"ecommerce_transform1\",\n \"authorization\": {\n \"roles\": [\n \"superuser\"\n ]\n },\n \"version\": \"8.4.0\",\n \"create_time\": 1656023416565,\n \"source\": {\n \"index\": [\n \"kibana_sample_data_ecommerce\"\n ],\n \"query\": {\n \"term\": {\n \"geoip.continent_name\": {\n \"value\": \"Asia\"\n }\n }\n }\n },\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform1\",\n \"pipeline\": \"add_timestamp_pipeline\"\n },\n \"frequency\": \"5m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"60s\"\n }\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\"\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n },\n \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n \"settings\": {},\n \"retention_policy\": {\n \"time\": {\n \"field\": \"order_date\",\n \"max_age\": \"30d\"\n }\n }\n }\n ]\n}" + } } } } @@ -58090,6 +59133,12 @@ "generated_dest_index", "preview" ] + }, + "examples": { + "PreviewTransformResponseExample1": { + "description": "An abbreviated response from `POST _transform/_preview` that contains a preview a transform that uses the pivot method.", + "value": "{\n \"preview\": [\n {\n \"max_price\": 171,\n \"customer_id\": \"10\"\n },\n {\n \"max_price\": 233,\n \"customer_id\": \"11\"\n },\n {\n \"max_price\": 200,\n \"customer_id\": \"12\"\n },\n {\n \"max_price\": 301,\n \"customer_id\": \"13\"\n },\n {\n \"max_price\": 176,\n \"customer_id\": \"14\"\n },\n {\n \"max_price\": 2250,\n \"customer_id\": \"15\"\n },\n {\n \"max_price\": 170,\n \"customer_id\": \"16\"\n },\n {\n \"max_price\": 243,\n \"customer_id\": \"17\"\n },\n {\n \"max_price\": 154,\n \"customer_id\": \"18\"\n },\n {\n \"max_price\": 393,\n \"customer_id\": \"19\"\n },\n {\n \"max_price\": 165,\n \"customer_id\": \"20\"\n },\n {\n \"max_price\": 115,\n \"customer_id\": \"21\"\n },\n {\n \"max_price\": 192,\n \"customer_id\": \"22\"\n },\n {\n \"max_price\": 169,\n \"customer_id\": \"23\"\n },\n {\n \"max_price\": 230,\n \"customer_id\": \"24\"\n },\n {\n \"max_price\": 278,\n \"customer_id\": \"25\"\n },\n {\n \"max_price\": 200,\n \"customer_id\": \"26\"\n },\n {\n \"max_price\": 344,\n \"customer_id\": \"27\"\n },\n {\n \"max_price\": 175,\n \"customer_id\": \"28\"\n },\n {\n \"max_price\": 177,\n \"customer_id\": \"29\"\n },\n {\n \"max_price\": 190,\n \"customer_id\": \"30\"\n },\n {\n \"max_price\": 190,\n \"customer_id\": \"31\"\n },\n {\n \"max_price\": 205,\n \"customer_id\": \"32\"\n },\n {\n \"max_price\": 215,\n \"customer_id\": \"33\"\n },\n {\n \"max_price\": 270,\n \"customer_id\": \"34\"\n },\n {\n \"max_price\": 184,\n \"customer_id\": \"36\"\n },\n {\n \"max_price\": 222,\n \"customer_id\": \"37\"\n },\n {\n \"max_price\": 370,\n \"customer_id\": \"38\"\n },\n {\n \"max_price\": 240,\n \"customer_id\": \"39\"\n },\n {\n \"max_price\": 230,\n \"customer_id\": \"4\"\n },\n {\n \"max_price\": 229,\n \"customer_id\": \"41\"\n },\n {\n \"max_price\": 190,\n \"customer_id\": \"42\"\n },\n {\n \"max_price\": 150,\n \"customer_id\": \"43\"\n },\n {\n \"max_price\": 175,\n \"customer_id\": \"44\"\n },\n {\n \"max_price\": 190,\n \"customer_id\": \"45\"\n },\n {\n \"max_price\": 150,\n \"customer_id\": \"46\"\n },\n {\n \"max_price\": 310,\n \"customer_id\": \"48\"\n },\n {\n \"max_price\": 223,\n \"customer_id\": \"49\"\n },\n {\n \"max_price\": 283,\n \"customer_id\": \"5\"\n },\n {\n \"max_price\": 185,\n \"customer_id\": \"50\"\n },\n {\n \"max_price\": 190,\n \"customer_id\": \"51\"\n },\n {\n \"max_price\": 333,\n \"customer_id\": \"52\"\n },\n {\n \"max_price\": 165,\n \"customer_id\": \"6\"\n },\n {\n \"max_price\": 144,\n \"customer_id\": \"7\"\n },\n {\n \"max_price\": 198,\n \"customer_id\": \"8\"\n },\n {\n \"max_price\": 210,\n \"customer_id\": \"9\"\n }\n ],\n \"generated_dest_index\": {\n \"mappings\": {\n \"_meta\": {\n \"_transform\": {\n \"transform\": \"transform-preview\",\n \"version\": {\n \"created\": \"10.0.0\"\n },\n \"creation_date_in_millis\": 1712948905889\n },\n \"created_by\": \"transform\"\n },\n \"properties\": {\n \"max_price\": {\n \"type\": \"half_float\"\n },\n \"customer_id\": {\n \"type\": \"keyword\"\n }\n }\n },\n \"settings\": {\n \"index\": {\n \"number_of_shards\": \"1\",\n \"auto_expand_replicas\": \"0-1\"\n }\n },\n \"aliases\": {}\n }\n}" + } } } } @@ -63562,6 +64611,12 @@ } } } + }, + "examples": { + "AsyncSearchSubmitRequestExample1": { + "description": "Perform a search request asynchronously with `POST /sales*/_async_search?size=0`. It accepts the same parameters and request body as the search API.\n", + "value": "{\n \"sort\": [\n { \"date\": { \"order\": \"asc\" } }\n ],\n \"aggs\": {\n \"sale_date\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"calendar_interval\": \"1d\"\n }\n }\n }\n}" + } } } } @@ -63584,6 +64639,28 @@ } ] } + }, + "examples": { + "BulkRequestExample1": { + "summary": "Multiple operations", + "description": "Run `POST _bulk` to perform multiple operations.", + "value": "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n{ \"delete\" : { \"_index\" : \"test\", \"_id\" : \"2\" } }\n{ \"create\" : { \"_index\" : \"test\", \"_id\" : \"3\" } }\n{ \"field1\" : \"value3\" }\n{ \"update\" : {\"_id\" : \"1\", \"_index\" : \"test\"} }\n{ \"doc\" : {\"field2\" : \"value2\"} }" + }, + "BulkRequestExample2": { + "summary": "Bulk updates", + "description": "When you run `POST _bulk` and use the `update` action, you can use `retry_on_conflict` as a field in the action itself (not in the extra payload line) to specify how many times an update should be retried in the case of a version conflict.\n", + "value": "{ \"update\" : {\"_id\" : \"1\", \"_index\" : \"index1\", \"retry_on_conflict\" : 3} }\n{ \"doc\" : {\"field\" : \"value\"} }\n{ \"update\" : { \"_id\" : \"0\", \"_index\" : \"index1\", \"retry_on_conflict\" : 3} }\n{ \"script\" : { \"source\": \"ctx._source.counter += params.param1\", \"lang\" : \"painless\", \"params\" : {\"param1\" : 1}}, \"upsert\" : {\"counter\" : 1}}\n{ \"update\" : {\"_id\" : \"2\", \"_index\" : \"index1\", \"retry_on_conflict\" : 3} }\n{ \"doc\" : {\"field\" : \"value\"}, \"doc_as_upsert\" : true }\n{ \"update\" : {\"_id\" : \"3\", \"_index\" : \"index1\", \"_source\" : true} }\n{ \"doc\" : {\"field\" : \"value\"} }\n{ \"update\" : {\"_id\" : \"4\", \"_index\" : \"index1\"} }\n{ \"doc\" : {\"field\" : \"value\"}, \"_source\": true}" + }, + "BulkRequestExample3": { + "summary": "Filter for failed operations", + "description": "To return only information about failed operations, run `POST /_bulk?filter_path=items.*.error`.\n", + "value": "{ \"update\": {\"_id\": \"5\", \"_index\": \"index1\"} }\n{ \"doc\": {\"my_field\": \"foo\"} }\n{ \"update\": {\"_id\": \"6\", \"_index\": \"index1\"} }\n{ \"doc\": {\"my_field\": \"foo\"} }\n{ \"create\": {\"_id\": \"7\", \"_index\": \"index1\"} }\n{ \"my_field\": \"foo\" }" + }, + "BulkRequestExample4": { + "summary": "Dynamic templates", + "description": "Run `POST /_bulk` to perform a bulk request that consists of index and create actions with the `dynamic_templates` parameter. The bulk request creates two new fields `work_location` and `home_location` with type `geo_point` according to the `dynamic_templates` parameter. However, the `raw_location` field is created using default dynamic mapping rules, as a text field in that case since it is supplied as a string in the JSON document.\n", + "value": "{ \"index\" : { \"_index\" : \"my_index\", \"_id\" : \"1\", \"dynamic_templates\": {\"work_location\": \"geo_point\"}} }\n{ \"field\" : \"value1\", \"work_location\": \"41.12,-71.34\", \"raw_location\": \"41.12,-71.34\"}\n{ \"create\" : { \"_index\" : \"my_index\", \"_id\" : \"2\", \"dynamic_templates\": {\"home_location\": \"geo_point\"}} }\n{ \"field\" : \"value2\", \"home_location\": \"41.12,-71.34\"}" + } } } }, @@ -63599,6 +64676,12 @@ "$ref": "#/components/schemas/_types:ScrollIds" } } + }, + "examples": { + "ClearScrollRequestExample1": { + "description": "Run `DELETE /_search/scroll` to clear the search context and results for a scrolling search.", + "value": "{\n \"scroll_id\": \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}" + } } } } @@ -63626,6 +64709,17 @@ "required": [ "template" ] + }, + "examples": { + "ClusterPutComponentTemplateRequestExample1": { + "summary": "Create a template", + "value": "{\n \"template\": null,\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"mappings\": {\n \"_source\": {\n \"enabled\": false\n },\n \"properties\": {\n \"host_name\": {\n \"type\": \"keyword\"\n },\n \"created_at\": {\n \"type\": \"date\",\n \"format\": \"EEE MMM dd HH:mm:ss Z yyyy\"\n }\n }\n }\n}" + }, + "ClusterPutComponentTemplateRequestExample2": { + "summary": "Create a template with aliases", + "description": "You can include index aliases in a component template. During index creation, the `{index}` placeholder in the alias name will be replaced with the actual index name that the template gets applied to.\n", + "value": "{\n \"template\": null,\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"aliases\": {\n \"alias1\": {},\n \"alias2\": {\n \"filter\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"routing\": \"shard-1\"\n },\n \"{index}-alias\": {}\n }\n}" + } } } }, @@ -63656,6 +64750,14 @@ "type": "string" } } + }, + "examples": { + "ConnectorPutRequestExample1": { + "value": "{\n \"index_name\": \"search-google-drive\",\n \"name\": \"My Connector\",\n \"service_type\": \"google_drive\"\n}" + }, + "ConnectorPutRequestExample2": { + "value": "{\n \"index_name\": \"search-google-drive\",\n \"name\": \"My Connector\",\n \"description\": \"My Connector to sync data to Elastic index from Google Drive\",\n \"service_type\": \"google_drive\",\n \"language\": \"english\"\n}" + } } } } @@ -63670,6 +64772,12 @@ "$ref": "#/components/schemas/_types.query_dsl:QueryContainer" } } + }, + "examples": { + "CountRequestExample1": { + "description": "Run `GET /my-index-000001/_count?q=user:kimchy`. Alternatively, run `GET /my-index-000001/_count` with the same query in the request body. Both requests count the number of documents in `my-index-000001` with a `user.id` of `kimchy`.\n", + "value": "{\n \"query\" : {\n \"term\" : { \"user.id\" : \"kimchy\" }\n }\n}" + } } } } @@ -63679,6 +64787,12 @@ "application/json": { "schema": { "type": "object" + }, + "examples": { + "CreateRequestExample1": { + "description": "Run `PUT my-index-000001/_create/1` to index a document into the `my-index-000001` index if no document with that ID exists.\n", + "value": "{\n \"@timestamp\": \"2099-11-15T13:12:00\",\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n}" + } } } }, @@ -63771,6 +64885,18 @@ "required": [ "query" ] + }, + "examples": { + "EqlSearchRequestExample1": { + "summary": "Basic query", + "description": "Run `GET /my-data-stream/_eql/search` to search for events that have a `process.name` of `cmd.exe` and a `process.pid` other than `2013`.\n", + "value": "{\n \"query\": \"\"\"\n process where (process.name == \"cmd.exe\" and process.pid != 2013)\n \"\"\"\n}" + }, + "EqlSearchRequestExample2": { + "summary": "Sequence query", + "description": "Run `GET /my-data-stream/_eql/search` to search for a sequence of events. The sequence starts with an event with an `event.category` of `file`, a `file.name` of `cmd.exe`, and a `process.pid` other than `2013`. It is followed by an event with an `event.category` of `process` and a `process.executable` that contains the substring `regsvr32`. These events must also share the same `process.pid` value.\n", + "value": "{\n \"query\": \"\"\"\n sequence by process.pid\n [ file where file.name == \"cmd.exe\" and process.pid != 2013 ]\n [ process where stringContains(process.executable, \"regsvr32\") ]\n \"\"\"\n}" + } } } }, @@ -63786,6 +64912,12 @@ "$ref": "#/components/schemas/_types.query_dsl:QueryContainer" } } + }, + "examples": { + "ExplainRequestExample1": { + "description": "Run `GET /my-index-000001/_explain/0` with the request body. Alternatively, run `GET /my-index-000001/_explain/0?q=message:elasticsearch`\n", + "value": "{\n \"query\" : {\n \"match\" : { \"message\" : \"elasticsearch\" }\n }\n}" + } } } } @@ -63806,6 +64938,12 @@ "$ref": "#/components/schemas/_types.mapping:RuntimeFields" } } + }, + "examples": { + "FieldCapabilitiesRequestExample1": { + "description": "Run `POST my-index-*/_field_caps?fields=rating` to get field capabilities and filter indices with a query. Indices that rewrite the provided filter to `match_none` on every shard will be filtered from the response.\n", + "value": "{\n \"index_filter\": {\n \"range\": {\n \"@timestamp\": {\n \"gte\": \"2018\"\n }\n }\n }\n}" + } } } } @@ -63833,6 +64971,12 @@ } } } + }, + "examples": { + "GraphExploreRequestExample1": { + "description": "Run `POST clicklogs/_graph/explore` for a basic exploration An initial graph explore query typically begins with a query to identify strongly related terms. Seed the exploration with a query. This example is searching `clicklogs` for people who searched for the term `midi`.Identify the vertices to include in the graph. This example is looking for product codes that are significantly associated with searches for `midi`. Find the connections. This example is looking for other search terms that led people to click on the products that are associated with searches for `midi`.\n", + "value": "{\n \"query\": {\n \"match\": {\n \"query.raw\": \"midi\"\n }\n },\n \"vertices\": [\n {\n \"field\": \"product\"\n }\n ],\n \"connections\": {\n \"vertices\": [\n {\n \"field\": \"query.raw\"\n }\n ]\n }\n}" + } } } } @@ -63842,6 +64986,18 @@ "application/json": { "schema": { "type": "object" + }, + "examples": { + "IndexRequestExample1": { + "summary": "Automate document IDs", + "description": "Run `POST my-index-000001/_doc/` to index a document. When you use the `POST //_doc/` request format, the `op_type` is automatically set to `create` and the index operation generates a unique ID for the document.\n", + "value": "{\n \"@timestamp\": \"2099-11-15T13:12:00\",\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n}" + }, + "IndexRequestExample2": { + "summary": "Define document IDs", + "description": "Run `PUT my-index-000001/_doc/1` to insert a JSON document into the `my-index-000001` index with an `_id` of 1.\n", + "value": "{\n \"@timestamp\": \"2099-11-15T13:12:00\",\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n}" + } } } }, @@ -63896,6 +65052,43 @@ "$ref": "#/components/schemas/_types.analysis:Tokenizer" } } + }, + "examples": { + "indicesAnalyzeRequestExample1": { + "summary": "No index specified", + "description": "You can apply any of the built-in analyzers to the text string without specifying an index.", + "value": "{\n \"analyzer\": \"standard\",\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample2": { + "summary": "An array of text strings", + "description": "If the text parameter is provided as array of strings, it is analyzed as a multi-value field.", + "value": "{\n \"analyzer\": \"standard\",\n \"text\": [\n \"this is a test\",\n \"the second text\"\n ]\n}" + }, + "indicesAnalyzeRequestExample3": { + "summary": "Custom analyzer example 1", + "description": "You can test a custom transient analyzer built from tokenizers, token filters, and char filters. Token filters use the filter parameter.", + "value": "{\n \"tokenizer\": \"keyword\",\n \"filter\": [\n \"lowercase\"\n ],\n \"char_filter\": [\n \"html_strip\"\n ],\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample4": { + "summary": "Custom analyzer example 2", + "description": "Custom tokenizers, token filters, and character filters can be specified in the request body.", + "value": "{\n \"tokenizer\": \"whitespace\",\n \"filter\": [\n \"lowercase\",\n {\n \"type\": \"stop\",\n \"stopwords\": [\n \"a\",\n \"is\",\n \"this\"\n ]\n }\n ],\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample5": { + "summary": "Derive analyzer from field mapping", + "description": "Run `GET /analyze_sample/_analyze` to run an analysis on the text using the default index analyzer associated with the `analyze_sample` index. Alternatively, the analyzer can be derived based on a field mapping.", + "value": "{\n \"field\": \"obj1.field1\",\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample6": { + "summary": "Normalizer", + "description": "Run `GET /analyze_sample/_analyze` and supply a normalizer for a keyword field if there is a normalizer associated with the specified index.", + "value": "{\n \"normalizer\": \"my_normalizer\",\n \"text\": \"BaR\"\n}" + }, + "indicesAnalyzeRequestExample7": { + "summary": "Explain analysis", + "description": "If you want to get more advanced details, set `explain` to `true`. It will output all token attributes for each token. You can filter token attributes you want to output by setting the `attributes` option. NOTE: The format of the additional detail information is labelled as experimental in Lucene and it may change in the future.\n", + "value": "{\n \"tokenizer\": \"standard\",\n \"filter\": [\n \"snowball\"\n ],\n \"text\": \"detailed output\",\n \"explain\": true,\n \"attributes\": [\n \"keyword\"\n ]\n}" + } } } } @@ -63923,6 +65116,11 @@ "$ref": "#/components/schemas/_types:Routing" } } + }, + "examples": { + "indicesPutAliasRequestExample1": { + "value": "{\n \"actions\": [\n {\n \"add\": {\n \"index\": \"my-data-stream\",\n \"alias\": \"my-alias\"\n }\n }\n ]\n}" + } } } } @@ -63975,6 +65173,17 @@ "type": "boolean" } } + }, + "examples": { + "IndicesPutIndexTemplateRequestExample1": { + "summary": "Create a template", + "value": "{\n \"index_patterns\" : [\"template*\"],\n \"priority\" : 1,\n \"template\": {\n \"settings\" : {\n \"number_of_shards\" : 2\n }\n }\n}" + }, + "IndicesPutIndexTemplateRequestExample2": { + "summary": "Create a template with aliases", + "description": "You can include index aliases in an index template.\nDuring index creation, the `{index}` placeholder in the alias name will be replaced with the actual index name that the template gets applied to.\n", + "value": "{\n \"index_patterns\": [\n \"template*\"\n ],\n \"template\": {\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"aliases\": {\n \"alias1\": {},\n \"alias2\": {\n \"filter\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"routing\": \"shard-1\"\n },\n \"{index}-alias\": {}\n }\n }\n}" + } } } }, @@ -64047,6 +65256,13 @@ "$ref": "#/components/schemas/_types.mapping:RuntimeFields" } } + }, + "examples": { + "indicesPutMappingRequestExample1": { + "summary": "Update multiple targets", + "description": "The update mapping API can be applied to multiple data streams or indices with a single request. For example, run `PUT /my-index-000001,my-index-000002/_mapping` to update mappings for the `my-index-000001` and `my-index-000002` indices at the same time.\n", + "value": "{\n \"properties\": {\n \"user\": {\n \"properties\": {\n \"name\": {\n \"type\": \"keyword\"\n }\n }\n }\n }\n}" + } } } }, @@ -64057,6 +65273,22 @@ "application/json": { "schema": { "$ref": "#/components/schemas/indices._types:IndexSettings" + }, + "examples": { + "IndicesPutSettingsRequestExample1": { + "summary": "Change a dynamic index setting", + "value": "{\n \"index\" : {\n \"number_of_replicas\" : 2\n }\n}" + }, + "indicesPutSettingsRequestExample2": { + "summary": "Reset an index setting", + "description": "To revert a setting to the default value, use `null`.", + "value": "{\n \"index\" : {\n \"refresh_interval\" : null\n }\n}" + }, + "indicesPutSettingsRequestExample3": { + "summary": "Update index analysis", + "description": "To add an analyzer, you must close the index, define the analyzer, then reopen the index.", + "value": "{\n \"analysis\" : {\n \"analyzer\":{\n \"content\":{\n \"type\":\"custom\",\n \"tokenizer\":\"whitespace\"\n }\n }\n }\n}\n\nPOST /my-index-000001/_open" + } } } }, @@ -64089,6 +65321,12 @@ } } } + }, + "examples": { + "indicesRolloverRequestExample1": { + "summary": "Create a new index for a data stream.", + "value": "{\n \"conditions\": {\n \"max_age\": \"7d\",\n \"max_docs\": 1000,\n \"max_primary_shard_size\": \"50gb\",\n \"max_primary_shard_docs\": \"2000\"\n }\n}" + } } } } @@ -64141,6 +65379,12 @@ "type": "boolean" } } + }, + "examples": { + "indicesSimulateTemplateRequestExample1": { + "description": "To see what settings will be applied by a template before you add it to the cluster, you can pass a template configuration in the request body. The specified template is used for the simulation if it has a higher priority than existing templates.\n", + "value": "{\n \"index_patterns\": [\"my-index-*\"],\n \"composed_of\": [\"ct2\"],\n \"priority\": 10,\n \"template\": {\n \"settings\": {\n \"index.number_of_replicas\": 1\n }\n }\n}" + } } } } @@ -64190,6 +65434,28 @@ "required": [ "input" ] + }, + "examples": { + "InferenceRequestExample1": { + "summary": "Completion task", + "description": "Run `POST _inference/completion/openai_chat_completions` to perform a completion on the example question.", + "value": "{\n \"input\": \"What is Elastic?\"\n}" + }, + "InferenceRequestExample2": { + "summary": "Rerank task", + "description": "Run `POST _inference/rerank/cohere_rerank` to perform reranking on the example input.", + "value": "{\n \"input\": [\"luke\", \"like\", \"leia\", \"chewy\",\"r2d2\", \"star\", \"wars\"],\n \"query\": \"star wars main character\"\n}" + }, + "InferenceRequestExample3": { + "summary": "Sparse embedding task", + "description": "Run `POST _inference/sparse_embedding/my-elser-model` to perform sparse embedding on the example sentence.", + "value": "{\n \"input\": \"The sky above the port was the color of television tuned to a dead channel.\"\n}" + }, + "InferenceRequestExample4": { + "summary": "Text embedding task", + "description": "Run `POST _inference/text_embedding/my-cohere-endpoint` to perform text embedding on the example sentence using the Cohere integration,", + "value": "{\n \"input\": \"The sky above the port was the color of television tuned to a dead channel.\",\n \"task_settings\": {\n \"input_type\": \"ingest\"\n }\n}" + } } } } @@ -64278,6 +65544,13 @@ "required": [ "docs" ] + }, + "examples": { + "SimulatePipelineRequestExample1": { + "summary": "Run an ingest pipeline against a set of provided documents.", + "description": "You can specify the used pipeline either in the request body or as a path parameter.", + "value": "{\n \"pipeline\" :\n {\n \"description\": \"_description\",\n \"processors\": [\n {\n \"set\" : {\n \"field\" : \"field2\",\n \"value\" : \"_value\"\n }\n }\n ]\n },\n \"docs\": [\n {\n \"_index\": \"index\",\n \"_id\": \"id\",\n \"_source\": {\n \"foo\": \"bar\"\n }\n },\n {\n \"_index\": \"index\",\n \"_id\": \"id\",\n \"_source\": {\n \"foo\": \"rab\"\n }\n }\n ]\n}" + } } } }, @@ -64300,6 +65573,28 @@ "$ref": "#/components/schemas/_types:Ids" } } + }, + "examples": { + "MultiGetRequestExample1": { + "summary": "Get documents by ID", + "description": "Run `GET /my-index-000001/_mget`. When you specify an index in the request URI, only the document IDs are required in the request body.\n", + "value": "{\n \"docs\": [\n {\n \"_id\": \"1\"\n },\n {\n \"_id\": \"2\"\n }\n ]\n}" + }, + "MultiGetRequestExample2": { + "summary": "Filter source fields", + "description": "Run `GET /_mget`. This request sets `_source` to `false` for document 1 to exclude the source entirely. It retrieves `field3` and `field4` from document 2. It retrieves the `user` field from document 3 but filters out the `user.location` field.\n", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"_source\": false\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"_source\": [ \"field3\", \"field4\" ]\n },\n {\n \"_index\": \"test\",\n \"_id\": \"3\",\n \"_source\": {\n \"include\": [ \"user\" ],\n \"exclude\": [ \"user.location\" ]\n }\n }\n ]\n}" + }, + "MultiGetRequestExample3": { + "summary": "Get stored fields", + "description": "Run `GET /_mget`. This request retrieves `field1` and `field2` from document 1 and `field3` and `field4` from document 2.\n", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"stored_fields\": [ \"field1\", \"field2\" ]\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"stored_fields\": [ \"field3\", \"field4\" ]\n }\n ]\n}" + }, + "MultiGetRequestExample4": { + "summary": "Document routing", + "description": "Run `GET /_mget?routing=key1`. If routing is used during indexing, you need to specify the routing value to retrieve documents. This request fetches `test/_doc/2` from the shard corresponding to routing key `key1`. It fetches `test/_doc/1` from the shard corresponding to routing key `key2`.\n", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"routing\": \"key2\"\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\"\n }\n ]\n}" + } } } }, @@ -64442,6 +65737,12 @@ "items": { "$ref": "#/components/schemas/_global.msearch_template:RequestItem" } + }, + "examples": { + "MultiSearchTemplateRequestExample1": { + "description": "Run `GET my-index/_msearch/template` to run multiple templated searches.", + "value": "{ }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}" + } } } }, @@ -64468,6 +65769,23 @@ } } } + }, + "examples": { + "MultiTermVectorsRequestExample1": { + "summary": "Get multiple term vectors", + "description": "Run `POST /my-index-000001/_mtermvectors`. When you specify an index in the request URI, the index does not need to be specified for each documents in the request body.\n", + "value": "{\n \"docs\": [\n {\n \"_id\": \"2\",\n \"fields\": [\n \"message\"\n ],\n \"term_statistics\": true\n },\n {\n \"_id\": \"1\"\n }\n ]\n}" + }, + "MultiTermVectorsRequestExample2": { + "summary": "Simplified syntax", + "description": "Run `POST /my-index-000001/_mtermvectors`. If all requested documents are in same index and the parameters are the same, you can use a simplified syntax.\n", + "value": "{\n \"ids\": [ \"1\", \"2\" ],\n \"parameters\": {\n \"fields\": [\n \"message\"\n ],\n \"term_statistics\": true\n }\n}" + }, + "MultiTermVectorsRequestExample3": { + "summary": "Artificial documents", + "description": "Run `POST /_mtermvectors` to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified `_index`.\n", + "value": "{\n \"docs\": [\n {\n \"_index\": \"my-index-000001\",\n \"doc\" : {\n \"message\" : \"test test test\"\n }\n },\n {\n \"_index\": \"my-index-000001\",\n \"doc\" : {\n \"message\" : \"Another test ...\"\n }\n }\n ]\n}" + } } } } @@ -64485,6 +65803,18 @@ "required": [ "script" ] + }, + "examples": { + "PutScriptRequestExample1": { + "summary": "Create a search template", + "description": "Run `PUT _scripts/my-search-template` to create a search template.\n", + "value": "{\n \"script\": {\n \"lang\": \"mustache\",\n \"source\": {\n \"query\": {\n \"match\": {\n \"message\": \"{{query_string}}\"\n }\n },\n \"from\": \"{{from}}\",\n \"size\": \"{{size}}\"\n }\n }\n}" + }, + "PutScriptRequestExample2": { + "summary": "Create a stored script", + "description": "Run `PUT _scripts/my-stored-script` to create a stored script.\n", + "value": "{\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"Math.log(_score * 2) + params['my_modifier']\"\n }\n}" + } } } }, @@ -64539,6 +65869,12 @@ "type": "string" } } + }, + "examples": { + "RenderSearchTemplateRequestExample1": { + "description": "Run `POST _render/template`", + "value": "{\n \"id\": \"my-search-template\",\n \"params\": {\n \"query_string\": \"hello world\",\n \"from\": 20,\n \"size\": 10\n }\n}" + } } } } @@ -64559,6 +65895,23 @@ "$ref": "#/components/schemas/_types:Script" } } + }, + "examples": { + "ExecutePainlessScriptRequestExample1": { + "summary": "Test context", + "description": "Run `POST /_scripts/painless/_execute`. The `painless_test` context is the default context. It runs scripts without additional parameters. The only variable that is available is `params`, which can be used to access user defined values. The result of the script is always converted to a string.\n", + "value": "{\n \"script\": {\n \"source\": \"params.count / params.total\",\n \"params\": {\n \"count\": 100.0,\n \"total\": 1000.0\n }\n }\n}" + }, + "ExecutePainlessScriptRequestExample2": { + "summary": "Filter context", + "description": "Run `POST /_scripts/painless/_execute` with a `filter` context. It treats scripts as if they were run inside a script query. For testing purposes, a document must be provided so that it will be temporarily indexed in-memory and is accessible from the script. More precisely, the `_source`, stored fields, and doc values of such a document are available to the script being tested.\n", + "value": "{\n \"script\": {\n \"source\": \"doc['field'].value.length() <= params.max_length\",\n \"params\": {\n \"max_length\": 4\n }\n },\n \"context\": \"filter\",\n \"context_setup\": {\n \"index\": \"my-index-000001\",\n \"document\": {\n \"field\": \"four\"\n }\n }\n}" + }, + "ExecutePainlessScriptRequestExample3": { + "summary": "Score context", + "description": "Run `POST /_scripts/painless/_execute` with a `score` context. It treats scripts as if they were run inside a `script_score` function in a `function_score` query.\n", + "value": "{\n \"script\": {\n \"source\": \"doc['rank'].value / params.max_rank\",\n \"params\": {\n \"max_rank\": 5.0\n }\n },\n \"context\": \"score\",\n \"context_setup\": {\n \"index\": \"my-index-000001\",\n \"document\": {\n \"rank\": 4\n }\n }\n}" + } } } } @@ -64579,6 +65932,12 @@ "required": [ "scroll_id" ] + }, + "examples": { + "ScrollRequestExample1": { + "description": "Run `GET /_search/scroll` to get the next batch of results for a scrolling search.", + "value": "{\n \"scroll_id\" : \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}" + } } } } @@ -64764,6 +66123,23 @@ } } } + }, + "examples": { + "SearchRequestExample1": { + "summary": "A simple term search", + "description": "Run `GET /my-index-000001/_search?from=40&size=20` to run a search.\n", + "value": "{\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n}" + }, + "SearchRequestExample2": { + "summary": "A point in time search", + "description": "Run `POST /_search` to run a point in time search. The `id` parameter tells Elasticsearch to run the request using contexts from this open point in time. The `keep_alive` parameter tells Elasticsearch how long it should extend the time to live of the point in time.\n", + "value": "{\n \"size\": 100, \n \"query\": {\n \"match\" : {\n \"title\" : \"elasticsearch\"\n }\n },\n \"pit\": {\n \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==\", \n \"keep_alive\": \"1m\" \n }\n}" + }, + "SearchRequestExample3": { + "summary": "Search slicing", + "description": "When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently. The result from running the first `GET /_search` request returns documents belonging to the first slice (`id: 0`). If you run a second request with `id` set to `1', it returns documents in the second slice. Since the maximum number of slices is set to `2`, the union of the results is equivalent to the results of a point-in-time search without slicing.\n", + "value": "{\n \"slice\": {\n \"id\": 0, \n \"max\": 2 \n },\n \"query\": {\n \"match\": {\n \"message\": \"foo\"\n }\n },\n \"pit\": {\n \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==\"\n }\n}" + } } } } @@ -64782,6 +66158,12 @@ } } } + }, + "examples": { + "SearchApplicationsSearchRequestExample1": { + "description": "Use `POST _application/search_application/my-app/_search` to run a search against a search application called `my-app` that uses a search template.", + "value": "{\n \"params\": {\n \"query_string\": \"my first query\",\n \"text_fields\": [\n {\"name\": \"title\", \"boost\": 5},\n {\"name\": \"description\", \"boost\": 1}\n ]\n }\n}" + } } } } @@ -64845,6 +66227,12 @@ "type": "boolean" } } + }, + "examples": { + "SearchMvtRequestExample1": { + "description": "Run `GET museums/_mvt/location/13/4207/2692` to search an index for `location` values that intersect the `13/4207/2692` vector tile.\n", + "value": "{\n \"grid_agg\": \"geotile\",\n \"grid_precision\": 2,\n \"fields\": [\n \"name\",\n \"price\"\n ],\n \"query\": {\n \"term\": {\n \"included\": true\n }\n },\n \"aggs\": {\n \"min_price\": {\n \"min\": {\n \"field\": \"price\"\n }\n },\n \"max_price\": {\n \"max\": {\n \"field\": \"price\"\n }\n },\n \"avg_price\": {\n \"avg\": {\n \"field\": \"price\"\n }\n }\n }\n}" + } } } } @@ -64878,6 +66266,12 @@ "type": "string" } } + }, + "examples": { + "SearchTemplateRequestExample1": { + "description": "Run `GET my-index/_search/template` to run a search with a search template.\n", + "value": "{\n \"id\": \"my-search-template\",\n \"params\": {\n \"query_string\": \"hello world\",\n \"from\": 0,\n \"size\": 10\n }\n}" + } } } }, @@ -64909,6 +66303,12 @@ "$ref": "#/components/schemas/_types:Metadata" } } + }, + "examples": { + "SecurityCreateApiKeyRequestExample1": { + "description": "Run `POST /_security/api_key` to create an API key. If `expiration` is not provided, the API keys do not expire. If `role_descriptors` is not provided, the permissions of the authenticated user are applied.\n", + "value": "{\n \"name\": \"my-api-key\",\n \"expiration\": \"1d\", \n \"role_descriptors\": { \n \"role-a\": {\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [\"index-a*\"],\n \"privileges\": [\"read\"]\n }\n ]\n },\n \"role-b\": {\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [\"index-b*\"],\n \"privileges\": [\"all\"]\n }\n ]\n }\n },\n \"metadata\": {\n \"application\": \"my-application\",\n \"environment\": {\n \"level\": 1,\n \"trusted\": true,\n \"tags\": [\"dev\", \"staging\"]\n }\n }\n}" + } } } }, @@ -64940,6 +66340,12 @@ } } } + }, + "examples": { + "SecurityHasPrivilegesRequestExample1": { + "description": "Run `GET /_security/user/_has_privileges` to check whether the current user has a specific set of cluster, index, and application privileges.", + "value": "{\n \"cluster\": [ \"monitor\", \"manage\" ],\n \"index\" : [\n {\n \"names\": [ \"suppliers\", \"products\" ],\n \"privileges\": [ \"read\" ]\n },\n {\n \"names\": [ \"inventory\" ],\n \"privileges\" : [ \"read\", \"write\" ]\n }\n ],\n \"application\": [\n {\n \"application\": \"inventory_manager\",\n \"privileges\" : [ \"read\", \"data:write/inventory\" ],\n \"resources\" : [ \"product/1852563\" ]\n }\n ]\n}" + } } } }, @@ -64997,6 +66403,23 @@ } } } + }, + "examples": { + "SecurityPutRoleRequestExample1": { + "summary": "Role example 1", + "description": "Run `POST /_security/role/my_admin_role` to create a role.", + "value": "{\n \"description\": \"Grants full access to all management features within the cluster.\",\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [ \"index1\", \"index2\" ],\n \"privileges\": [\"all\"],\n \"field_security\" : { // optional\n \"grant\" : [ \"title\", \"body\" ]\n },\n \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\" // optional\n }\n ],\n \"applications\": [\n {\n \"application\": \"myapp\",\n \"privileges\": [ \"admin\", \"read\" ],\n \"resources\": [ \"*\" ]\n }\n ],\n \"run_as\": [ \"other_user\" ], // optional\n \"metadata\" : { // optional\n \"version\" : 1\n }\n}" + }, + "SecurityPutRoleRequestExample2": { + "summary": "Role example 2", + "description": "Run `POST /_security/role/cli_or_drivers_minimal` to configure a role that can run SQL in JDBC.", + "value": "{\n \"cluster\": [\"cluster:monitor/main\"],\n \"indices\": [\n {\n \"names\": [\"test\"],\n \"privileges\": [\"read\", \"indices:admin/get\"]\n }\n ]\n}" + }, + "SecurityPutRoleRequestExample3": { + "summary": "Role example 3", + "description": "Run `POST /_security/role/only_remote_access_role` to configure a role with remote indices and remote cluster privileges for a remote cluster.", + "value": "{\n \"remote_indices\": [\n {\n \"clusters\": [\"my_remote\"], \n \"names\": [\"logs*\"], \n \"privileges\": [\"read\", \"read_cross_cluster\", \"view_index_metadata\"] \n }\n ],\n \"remote_cluster\": [\n {\n \"clusters\": [\"my_remote\"], \n \"privileges\": [\"monitor_stats\"] \n }\n ]\n}" + } } } }, @@ -65033,6 +66456,23 @@ "$ref": "#/components/schemas/_types:SortResults" } } + }, + "examples": { + "QueryApiKeysRequestExample1": { + "summary": "Query API keys by ID", + "description": "Run `GET /_security/_query/api_key?with_limited_by=true` to retrieve an API key by ID.", + "value": "{\n \"query\": {\n \"ids\": {\n \"values\": [\n \"VuaCfGcBCdbkQm-e5aOx\"\n ]\n }\n }\n}" + }, + "QueryApiKeysRequestExample2": { + "summary": "Query API keys with pagination", + "description": "Run `GET /_security/_query/api_key`. Use a `bool` query to issue complex logical conditions and use `from`, `size`, and `sort` to help paginate the result. For example, the API key name must begin with `app1-key-` and must not be `app1-key-01`. It must be owned by a username with the wildcard pattern `org-*-user` and the `environment` metadata field must have a `production` value. The offset to begin the search result is the twentieth (zero-based index) API key. The page size of the response is 10 API keys. The result is first sorted by creation date in descending order, then by name in ascending order.\n", + "value": "{\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"prefix\": {\n \"name\": \"app1-key-\" \n }\n },\n {\n \"term\": {\n \"invalidated\": \"false\" \n }\n }\n ],\n \"must_not\": [\n {\n \"term\": {\n \"name\": \"app1-key-01\" \n }\n }\n ],\n \"filter\": [\n {\n \"wildcard\": {\n \"username\": \"org-*-user\" \n }\n },\n {\n \"term\": {\n \"metadata.environment\": \"production\" \n }\n }\n ]\n }\n },\n \"from\": 20, \n \"size\": 10, \n \"sort\": [ \n { \"creation\": { \"order\": \"desc\", \"format\": \"date_time\" } },\n \"name\"\n ]\n}" + }, + "QueryApiKeysRequestExample3": { + "summary": "Query API keys by name", + "description": "Run `GET /_security/_query/api_key` to retrieve the API key by name.", + "value": "{\n \"query\": {\n \"term\": {\n \"name\": {\n \"value\": \"application-key-1\"\n }\n }\n }\n}" + } } } } @@ -65061,6 +66501,18 @@ "$ref": "#/components/schemas/_types:SortResults" } } + }, + "examples": { + "QueryRolesRequestExample1": { + "summary": "Query roles by name", + "description": "Run `POST /_security/_query/role` to lists all roles, sorted by the role name.", + "value": "{\n \"sort\": [\"name\"]\n}" + }, + "QueryRolesRequestExample2": { + "summary": "Query roles by description", + "description": "Run `POST /_security/_query/role` to query only the user access role, given its description. It returns only the best matching role because `size` is set to `1`.\n", + "value": "{\n \"query\": {\n \"match\": {\n \"description\": {\n \"query\": \"user access\"\n }\n }\n },\n \"size\": 1 \n}" + } } } } @@ -65142,6 +66594,12 @@ "$ref": "#/components/schemas/_types:Duration" } } + }, + "examples": { + "QuerySqlRequestExample1": { + "description": "Run `POST _sql?format=txt` to get results for an SQL search.", + "value": "{\n \"query\": \"SELECT * FROM library ORDER BY page_count DESC LIMIT 5\"\n}" + } } } }, @@ -65171,6 +66629,13 @@ "required": [ "query" ] + }, + "examples": { + "TranslateSqlRequestExample1": { + "summary": "sql/apis/sql-translate-api.asciidoc:12", + "description": "", + "value": "{\n \"query\": \"SELECT * FROM library ORDER BY page_count DESC\",\n \"fetch_size\": 10\n}" + } } } }, @@ -65211,6 +66676,12 @@ "required": [ "field" ] + }, + "examples": { + "TermsEnumRequestExample1": { + "description": "Run `POST stackoverflow/_terms_enum`.", + "value": "{\n \"field\" : \"tags\",\n \"string\" : \"kiba\"\n}" + } } } } @@ -65236,6 +66707,33 @@ } } } + }, + "examples": { + "TermVectorsRequestExample1": { + "summary": "Return stored term vectors", + "description": "Run `GET /my-index-000001/_termvectors/1` to return all information and statistics for field `text` in document 1.\n", + "value": "{\n \"fields\" : [\"text\"],\n \"offsets\" : true,\n \"payloads\" : true,\n \"positions\" : true,\n \"term_statistics\" : true,\n \"field_statistics\" : true\n}" + }, + "TermVectorsRequestExample2": { + "summary": "Per-field analyzer", + "description": "Run `GET /my-index-000001/_termvectors/1` to set per-field analyzers. A different analyzer than the one at the field may be provided by using the `per_field_analyzer` parameter.\n", + "value": "{\n \"doc\" : {\n \"fullname\" : \"John Doe\",\n \"text\" : \"test test test\"\n },\n \"fields\": [\"fullname\"],\n \"per_field_analyzer\" : {\n \"fullname\": \"keyword\"\n }\n}" + }, + "TermVectorsRequestExample3": { + "summary": "Terms filtering", + "description": "Run `GET /imdb/_termvectors` to filter the terms returned based on their tf-idf scores. It returns the three most \"interesting\" keywords from the artificial document having the given \"plot\" field value. Notice that the keyword \"Tony\" or any stop words are not part of the response, as their tf-idf must be too low.\n", + "value": "{\n \"doc\": {\n \"plot\": \"When wealthy industrialist Tony Stark is forced to build an armored suit after a life-threatening incident, he ultimately decides to use its technology to fight against evil.\"\n },\n \"term_statistics\": true,\n \"field_statistics\": true,\n \"positions\": false,\n \"offsets\": false,\n \"filter\": {\n \"max_num_terms\": 3,\n \"min_term_freq\": 1,\n \"min_doc_freq\": 1\n }\n}" + }, + "TermVectorsRequestExample4": { + "summary": "Generate term vectors on the fly", + "description": "Run `GET /my-index-000001/_termvectors/1`. Term vectors which are not explicitly stored in the index are automatically computed on the fly. This request returns all information and statistics for the fields in document 1, even though the terms haven't been explicitly stored in the index. Note that for the field text, the terms are not regenerated.\n", + "value": "{\n \"fields\" : [\"text\", \"some_field_without_term_vectors\"],\n \"offsets\" : true,\n \"positions\" : true,\n \"term_statistics\" : true,\n \"field_statistics\" : true\n}" + }, + "TermVectorsRequestExample5": { + "summary": "Artificial documents", + "description": "Run `GET /my-index-000001/_termvectors`. Term vectors can be generated for artificial documents, that is for documents not present in the index. If dynamic mapping is turned on (default), the document fields not in the original mapping will be dynamically created.\n", + "value": "{\n \"doc\" : {\n \"fullname\" : \"John Doe\",\n \"text\" : \"test test test\"\n }\n}" + } } } } @@ -65275,6 +66773,12 @@ "$ref": "#/components/schemas/transform._types:Latest" } } + }, + "examples": { + "PreviewTransformRequestExample1": { + "description": "Run `POST _transform/_preview` to preview a transform that uses the pivot method.", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\"\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\",\n \"missing_bucket\": true\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n }\n}" + } } } } diff --git a/output/schema/schema.json b/output/schema/schema.json index d5bb518ba8..2a7314b20c 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -22787,6 +22787,28 @@ } }, "description": "Bulk index or delete documents.\nPerform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", + "examples": { + "BulkRequestExample1": { + "description": "Run `POST _bulk` to perform multiple operations.", + "summary": "Multiple operations", + "value": "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n{ \"delete\" : { \"_index\" : \"test\", \"_id\" : \"2\" } }\n{ \"create\" : { \"_index\" : \"test\", \"_id\" : \"3\" } }\n{ \"field1\" : \"value3\" }\n{ \"update\" : {\"_id\" : \"1\", \"_index\" : \"test\"} }\n{ \"doc\" : {\"field2\" : \"value2\"} }" + }, + "BulkRequestExample2": { + "description": "When you run `POST _bulk` and use the `update` action, you can use `retry_on_conflict` as a field in the action itself (not in the extra payload line) to specify how many times an update should be retried in the case of a version conflict.\n", + "summary": "Bulk updates", + "value": "{ \"update\" : {\"_id\" : \"1\", \"_index\" : \"index1\", \"retry_on_conflict\" : 3} }\n{ \"doc\" : {\"field\" : \"value\"} }\n{ \"update\" : { \"_id\" : \"0\", \"_index\" : \"index1\", \"retry_on_conflict\" : 3} }\n{ \"script\" : { \"source\": \"ctx._source.counter += params.param1\", \"lang\" : \"painless\", \"params\" : {\"param1\" : 1}}, \"upsert\" : {\"counter\" : 1}}\n{ \"update\" : {\"_id\" : \"2\", \"_index\" : \"index1\", \"retry_on_conflict\" : 3} }\n{ \"doc\" : {\"field\" : \"value\"}, \"doc_as_upsert\" : true }\n{ \"update\" : {\"_id\" : \"3\", \"_index\" : \"index1\", \"_source\" : true} }\n{ \"doc\" : {\"field\" : \"value\"} }\n{ \"update\" : {\"_id\" : \"4\", \"_index\" : \"index1\"} }\n{ \"doc\" : {\"field\" : \"value\"}, \"_source\": true}" + }, + "BulkRequestExample3": { + "description": "To return only information about failed operations, run `POST /_bulk?filter_path=items.*.error`.\n", + "summary": "Filter for failed operations", + "value": "{ \"update\": {\"_id\": \"5\", \"_index\": \"index1\"} }\n{ \"doc\": {\"my_field\": \"foo\"} }\n{ \"update\": {\"_id\": \"6\", \"_index\": \"index1\"} }\n{ \"doc\": {\"my_field\": \"foo\"} }\n{ \"create\": {\"_id\": \"7\", \"_index\": \"index1\"} }\n{ \"my_field\": \"foo\" }" + }, + "BulkRequestExample4": { + "description": "Run `POST /_bulk` to perform a bulk request that consists of index and create actions with the `dynamic_templates` parameter. The bulk request creates two new fields `work_location` and `home_location` with type `geo_point` according to the `dynamic_templates` parameter. However, the `raw_location` field is created using default dynamic mapping rules, as a text field in that case since it is supplied as a string in the JSON document.\n", + "summary": "Dynamic templates", + "value": "{ \"index\" : { \"_index\" : \"my_index\", \"_id\" : \"1\", \"dynamic_templates\": {\"work_location\": \"geo_point\"}} }\n{ \"field\" : \"value1\", \"work_location\": \"41.12,-71.34\", \"raw_location\": \"41.12,-71.34\"}\n{ \"create\" : { \"_index\" : \"my_index\", \"_id\" : \"2\", \"dynamic_templates\": {\"home_location\": \"geo_point\"}} }\n{ \"field\" : \"value2\", \"home_location\": \"41.12,-71.34\"}" + } + }, "generics": [ { "name": "TDocument", @@ -23465,6 +23487,12 @@ ] }, "description": "Clear a scrolling search.\nClear the search context and results for a scrolling search.", + "examples": { + "ClearScrollRequestExample1": { + "description": "Run `DELETE /_search/scroll` to clear the search context and results for a scrolling search.", + "value": "{\n \"scroll_id\": \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -23590,6 +23618,12 @@ ] }, "description": "Close a point in time.\nA point in time must be opened explicitly before being used in search requests.\nThe `keep_alive` parameter tells Elasticsearch how long it should persist.\nA point in time is automatically closed when the `keep_alive` period has elapsed.\nHowever, keeping points in time has a cost; close them as soon as they are no longer required for search requests.", + "examples": { + "ClosePointInTimeRequestExample1": { + "description": "Run `DELETE /_pit` to close a point-in-time.", + "value": "{\n \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -23635,6 +23669,12 @@ } ] }, + "examples": { + "ClosePointInTimeResponseExample1": { + "description": "A successful response from `DELETE /_pit`.", + "value": "{\n \"succeeded\": true, \n \"num_freed\": 3 \n}" + } + }, "exceptions": [ { "body": { @@ -23698,6 +23738,12 @@ ] }, "description": "Count search results.\nGet the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "examples": { + "CountRequestExample1": { + "description": "Run `GET /my-index-000001/_count?q=user:kimchy`. Alternatively, run `GET /my-index-000001/_count` with the same query in the request body. Both requests count the number of documents in `my-index-000001` with a `user.id` of `kimchy`.\n", + "value": "{\n \"query\" : {\n \"term\" : { \"user.id\" : \"kimchy\" }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -23934,6 +23980,12 @@ } ] }, + "examples": { + "CountResponseExample1": { + "description": "A successful response from `GET /my-index-000001/_count?q=user:kimchy`.", + "value": "{\n \"count\": 1,\n \"_shards\": {\n \"total\": 1,\n \"successful\": 1,\n \"skipped\": 0,\n \"failed\": 0\n }\n}" + } + }, "name": { "name": "Response", "namespace": "_global.count" @@ -23957,6 +24009,12 @@ } }, "description": "Create a new document in the index.\n\nYou can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", + "examples": { + "CreateRequestExample1": { + "description": "Run `PUT my-index-000001/_create/1` to index a document into the `my-index-000001` index if no document with that ID exists.\n", + "value": "{\n \"@timestamp\": \"2099-11-15T13:12:00\",\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n}" + } + }, "generics": [ { "name": "TDocument", @@ -24368,6 +24426,28 @@ ] }, "description": "Delete documents.\n\nDeletes documents that match the specified query.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `delete` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\nWhen you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning.\nIf a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails.\n\nNOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete.\nA bulk delete request is performed for each batch of matching documents.\nIf a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off.\nIf the maximum retry limit is reached, processing halts and all failed requests are returned in the response.\nAny delete requests that completed successfully still stick, they are not rolled back.\n\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query.\n\n**Throttling delete requests**\n\nTo control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to disable throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nDelete by query supports sliced scroll to parallelize the delete process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` lets Elasticsearch choose the number of slices to use.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\nAdding slices to the delete by query operation creates sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with slices only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead.\n* Delete performance scales linearly across available resources with the number of slices.\n\nWhether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Cancel a delete by query operation**\n\nAny delete by query can be canceled using the task cancel API. For example:\n\n```\nPOST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel\n```\n\nThe task ID can be found by using the get tasks API.\n\nCancellation should happen quickly but might take a few seconds.\nThe get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.", + "examples": { + "DeleteByQueryRequestExample1": { + "description": "Run `POST /my-index-000001,my-index-000002/_delete_by_query` to delete all documents from multiple data streams or indices.", + "summary": "Delete all documents", + "value": "{\n \"query\": {\n \"match_all\": {}\n }\n}" + }, + "DeleteByQueryRequestExample2": { + "description": "Run `POST my-index-000001/_delete_by_query` to delete a document by using a unique attribute.", + "summary": "Delete a single document", + "value": "{\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"max_docs\": 1\n}" + }, + "DeleteByQueryRequestExample3": { + "description": "Run `POST my-index-000001/_delete_by_query` to slice a delete by query manually. Provide a slice ID and total number of slices.\n", + "summary": "Slice manually", + "value": "{\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n },\n \"query\": {\n \"range\": {\n \"http.response.bytes\": {\n \"lt\": 2000000\n }\n }\n }\n}" + }, + "DeleteByQueryRequestExample4": { + "description": "Run `POST my-index-000001/_delete_by_query?refresh&slices=5` to let delete by query automatically parallelize using sliced scroll to slice on `_id`. The `slices` query parameter value specifies the number of slices to use.\n", + "summary": "Automatic slicing", + "value": "{\n \"query\": {\n \"range\": {\n \"http.response.bytes\": {\n \"lt\": 2000000\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -25620,6 +25700,12 @@ ] }, "description": "Explain a document match result.\nGet information about why a specific document matches, or doesn't match, a query.\nIt computes a score explanation for a query and a specific document.", + "examples": { + "ExplainRequestExample1": { + "description": "Run `GET /my-index-000001/_explain/0` with the request body. Alternatively, run `GET /my-index-000001/_explain/0?q=message:elasticsearch`\n", + "value": "{\n \"query\" : {\n \"match\" : { \"message\" : \"elasticsearch\" }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -26148,6 +26234,12 @@ ] }, "description": "Get the field capabilities.\n\nGet information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.", + "examples": { + "FieldCapabilitiesRequestExample1": { + "description": "Run `POST my-index-*/_field_caps?fields=rating` to get field capabilities and filter indices with a query. Indices that rewrite the provided filter to `match_none` on every shard will be filtered from the response.\n", + "value": "{\n \"index_filter\": {\n \"range\": {\n \"@timestamp\": {\n \"gte\": \"2018\"\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -28877,6 +28969,18 @@ } }, "description": "Create or update a document in an index.\n\nAdd a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "examples": { + "IndexRequestExample1": { + "description": "Run `POST my-index-000001/_doc/` to index a document. When you use the `POST //_doc/` request format, the `op_type` is automatically set to `create` and the index operation generates a unique ID for the document.\n", + "summary": "Automate document IDs", + "value": "{\n \"@timestamp\": \"2099-11-15T13:12:00\",\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n}" + }, + "IndexRequestExample2": { + "description": "Run `PUT my-index-000001/_doc/1` to insert a JSON document into the `my-index-000001` index with an `_id` of 1.\n", + "summary": "Define document IDs", + "value": "{\n \"@timestamp\": \"2099-11-15T13:12:00\",\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n}" + } + }, "generics": [ { "name": "TDocument", @@ -29680,6 +29784,28 @@ ] }, "description": "Get multiple documents.\n\nGet multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", + "examples": { + "MultiGetRequestExample1": { + "description": "Run `GET /my-index-000001/_mget`. When you specify an index in the request URI, only the document IDs are required in the request body.\n", + "summary": "Get documents by ID", + "value": "{\n \"docs\": [\n {\n \"_id\": \"1\"\n },\n {\n \"_id\": \"2\"\n }\n ]\n}" + }, + "MultiGetRequestExample2": { + "description": "Run `GET /_mget`. This request sets `_source` to `false` for document 1 to exclude the source entirely. It retrieves `field3` and `field4` from document 2. It retrieves the `user` field from document 3 but filters out the `user.location` field.\n", + "summary": "Filter source fields", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"_source\": false\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"_source\": [ \"field3\", \"field4\" ]\n },\n {\n \"_index\": \"test\",\n \"_id\": \"3\",\n \"_source\": {\n \"include\": [ \"user\" ],\n \"exclude\": [ \"user.location\" ]\n }\n }\n ]\n}" + }, + "MultiGetRequestExample3": { + "description": "Run `GET /_mget`. This request retrieves `field1` and `field2` from document 1 and `field3` and `field4` from document 2.\n", + "summary": "Get stored fields", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"stored_fields\": [ \"field1\", \"field2\" ]\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"stored_fields\": [ \"field3\", \"field4\" ]\n }\n ]\n}" + }, + "MultiGetRequestExample4": { + "description": "Run `GET /_mget?routing=key1`. If routing is used during indexing, you need to specify the routing value to retrieve documents. This request fetches `test/_doc/2` from the shard corresponding to routing key `key1`. It fetches `test/_doc/1` from the shard corresponding to routing key `key2`.\n", + "summary": "Document routing", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"routing\": \"key2\"\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\"\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -30961,6 +31087,12 @@ } }, "description": "Run multiple templated searches.\n\nRun multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```", + "examples": { + "MultiSearchTemplateRequestExample1": { + "description": "Run `GET my-index/_msearch/template` to run multiple templated searches.", + "value": "{ }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -31403,6 +31535,23 @@ ] }, "description": "Get multiple term vectors.\n\nGet multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.", + "examples": { + "MultiTermVectorsRequestExample1": { + "description": "Run `POST /my-index-000001/_mtermvectors`. When you specify an index in the request URI, the index does not need to be specified for each documents in the request body.\n", + "summary": "Get multiple term vectors", + "value": "{\n \"docs\": [\n {\n \"_id\": \"2\",\n \"fields\": [\n \"message\"\n ],\n \"term_statistics\": true\n },\n {\n \"_id\": \"1\"\n }\n ]\n}" + }, + "MultiTermVectorsRequestExample2": { + "description": "Run `POST /my-index-000001/_mtermvectors`. If all requested documents are in same index and the parameters are the same, you can use a simplified syntax.\n", + "summary": "Simplified syntax", + "value": "{\n \"ids\": [ \"1\", \"2\" ],\n \"parameters\": {\n \"fields\": [\n \"message\"\n ],\n \"term_statistics\": true\n }\n}" + }, + "MultiTermVectorsRequestExample3": { + "description": "Run `POST /_mtermvectors` to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified `_index`.\n", + "summary": "Artificial documents", + "value": "{\n \"docs\": [\n {\n \"_index\": \"my-index-000001\",\n \"doc\" : {\n \"message\" : \"test test test\"\n }\n },\n {\n \"_index\": \"my-index-000001\",\n \"doc\" : {\n \"message\" : \"Another test ...\"\n }\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -31930,6 +32079,18 @@ ] }, "description": "Create or update a script or search template.\nCreates or updates a stored script or search template.", + "examples": { + "PutScriptRequestExample1": { + "description": "Run `PUT _scripts/my-search-template` to create a search template.\n", + "summary": "Create a search template", + "value": "{\n \"script\": {\n \"lang\": \"mustache\",\n \"source\": {\n \"query\": {\n \"match\": {\n \"message\": \"{{query_string}}\"\n }\n },\n \"from\": \"{{from}}\",\n \"size\": \"{{size}}\"\n }\n }\n}" + }, + "PutScriptRequestExample2": { + "description": "Run `PUT _scripts/my-stored-script` to create a stored script.\n", + "summary": "Create a stored script", + "value": "{\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"Math.log(_score * 2) + params['my_modifier']\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -33067,6 +33228,73 @@ ] }, "description": "Reindex documents.\n\nCopy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n**Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.", + "examples": { + "ReindexRequestExample1": { + "description": "Run `POST _reindex` to reindex from multiple sources. The `index` attribute in source can be a list, which enables you to copy from lots of sources in one request. This example copies documents from the `my-index-000001` and `my-index-000002` indices.\n", + "summary": "Reindex multiple sources", + "value": "{\n \"source\": {\n \"index\": [\"my-index-000001\", \"my-index-000002\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000002\"\n }\n}" + }, + "ReindexRequestExample10": { + "description": "You can use Painless to reindex daily indices to apply a new template to the existing documents. The script extracts the date from the index name and creates a new index with `-1` appended. For example, all data from `metricbeat-2016.05.31` will be reindexed into `metricbeat-2016.05.31-1`.\n", + "summary": "Reindex with Painless", + "value": "{\n \"source\": {\n \"index\": \"metricbeat-*\"\n },\n \"dest\": {\n \"index\": \"metricbeat\"\n },\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'\"\n }\n}" + }, + "ReindexRequestExample11": { + "description": "Run `POST _reindex` to extract a random subset of the source for testing. You might need to adjust the `min_score` value depending on the relative amount of data extracted from source.\n", + "summary": "Reindex a random subset", + "value": "{\n \"max_docs\": 10,\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"function_score\" : {\n \"random_score\" : {},\n \"min_score\" : 0.9\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample12": { + "description": "Run `POST _reindex` to modify documents during reindexing. This example bumps the version of the source document.\n", + "summary": "Reindex modified documents", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\",\n \"version_type\": \"external\"\n },\n \"script\": {\n \"source\": \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\",\n \"lang\": \"painless\"\n }\n}" + }, + "ReindexRequestExample13": { + "description": "When using Elastic Cloud, you can run `POST _reindex` and authenticate against a remote cluster with an API key.\n", + "summary": "Reindex from remote on Elastic Cloud", + "value": "{\n \"source\": {\n \"remote\": {\n \"host\": \"http://otherhost:9200\",\n \"username\": \"user\",\n \"password\": \"pass\"\n },\n \"index\": \"my-index-000001\",\n \"query\": {\n \"match\": {\n \"test\": \"data\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample2": { + "description": "Run `POST _reindex` to slice a reindex request manually. Provide a slice ID and total number of slices to each request.\n", + "summary": "Manual slicing", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample3": { + "description": "Run `POST _reindex?slices=5&refresh` to automatically parallelize using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use.\n", + "summary": "Automatic slicing", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample4": { + "description": "By default if reindex sees a document with routing then the routing is preserved unless it's changed by the script. You can set `routing` on the `dest` request to change this behavior. In this example, run `POST _reindex` to copy all documents from the `source` with the company name `cat` into the `dest` with routing set to `cat`.\n", + "summary": "Routing", + "value": "{\n \"source\": {\n \"index\": \"source\",\n \"query\": {\n \"match\": {\n \"company\": \"cat\"\n }\n }\n },\n \"dest\": {\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n }\n}" + }, + "ReindexRequestExample5": { + "description": "Run `POST _reindex` and use the ingest pipelines feature.", + "summary": "Ingest pipelines", + "value": "{\n \"source\": {\n \"index\": \"source\"\n },\n \"dest\": {\n \"index\": \"dest\",\n \"pipeline\": \"some_ingest_pipeline\"\n }\n}" + }, + "ReindexRequestExample6": { + "description": "Run `POST _reindex` and add a query to the `source` to limit the documents to reindex. For example, this request copies documents into `my-new-index-000001` only if they have a `user.id` of `kimchy`.\n", + "summary": "Reindex with a query", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample7": { + "description": "You can limit the number of processed documents by setting `max_docs`. For example, run `POST _reindex` to copy a single document from `my-index-000001` to `my-new-index-000001`.\n", + "summary": "Reindex with max_docs", + "value": "{\n \"max_docs\": 1,\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample8": { + "description": "You can use source filtering to reindex a subset of the fields in the original documents. For example, run `POST _reindex` the reindex only the `user.id` and `_doc` fields of each document.\n", + "summary": "Reindex selected fields", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"_source\": [\"user.id\", \"_doc\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample9": { + "description": "A reindex operation can build a copy of an index with renamed fields. If your index has documents with `text` and `flag` fields, you can change the latter field name to `tag` during the reindex.\n", + "summary": "Reindex new field names", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n },\n \"script\": {\n \"source\": \"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -34039,6 +34267,12 @@ ] }, "description": "Render a search template.\n\nRender a search template as a search request body.", + "examples": { + "RenderSearchTemplateRequestExample1": { + "description": "Run `POST _render/template`", + "value": "{\n \"id\": \"my-search-template\",\n \"params\": {\n \"query_string\": \"hello world\",\n \"from\": 20,\n \"size\": 10\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -34241,6 +34475,23 @@ ] }, "description": "Run a script.\n\nRuns a script and returns a result.\nUse this API to build and test scripts, such as when defining a script for a runtime field.\nThis API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster.\n\nThe API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is.\n\nEach context requires a script, but additional parameters depend on the context you're using for that script.", + "examples": { + "ExecutePainlessScriptRequestExample1": { + "description": "Run `POST /_scripts/painless/_execute`. The `painless_test` context is the default context. It runs scripts without additional parameters. The only variable that is available is `params`, which can be used to access user defined values. The result of the script is always converted to a string.\n", + "summary": "Test context", + "value": "{\n \"script\": {\n \"source\": \"params.count / params.total\",\n \"params\": {\n \"count\": 100.0,\n \"total\": 1000.0\n }\n }\n}" + }, + "ExecutePainlessScriptRequestExample2": { + "description": "Run `POST /_scripts/painless/_execute` with a `filter` context. It treats scripts as if they were run inside a script query. For testing purposes, a document must be provided so that it will be temporarily indexed in-memory and is accessible from the script. More precisely, the `_source`, stored fields, and doc values of such a document are available to the script being tested.\n", + "summary": "Filter context", + "value": "{\n \"script\": {\n \"source\": \"doc['field'].value.length() <= params.max_length\",\n \"params\": {\n \"max_length\": 4\n }\n },\n \"context\": \"filter\",\n \"context_setup\": {\n \"index\": \"my-index-000001\",\n \"document\": {\n \"field\": \"four\"\n }\n }\n}" + }, + "ExecutePainlessScriptRequestExample3": { + "description": "Run `POST /_scripts/painless/_execute` with a `score` context. It treats scripts as if they were run inside a `script_score` function in a `function_score` query.\n", + "summary": "Score context", + "value": "{\n \"script\": {\n \"source\": \"doc['rank'].value / params.max_rank\",\n \"params\": {\n \"max_rank\": 5.0\n }\n },\n \"context\": \"score\",\n \"context_setup\": {\n \"index\": \"my-index-000001\",\n \"document\": {\n \"rank\": 4\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -34323,6 +34574,12 @@ ] }, "description": "Run a scrolling search.\n\nIMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.", + "examples": { + "ScrollRequestExample1": { + "description": "Run `GET /_search/scroll` to get the next batch of results for a scrolling search.", + "value": "{\n \"scroll_id\" : \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -34982,6 +35239,23 @@ ] }, "description": "Run a search.\n\nGet search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.", + "examples": { + "SearchRequestExample1": { + "description": "Run `GET /my-index-000001/_search?from=40&size=20` to run a search.\n", + "summary": "A simple term search", + "value": "{\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n}" + }, + "SearchRequestExample2": { + "description": "Run `POST /_search` to run a point in time search. The `id` parameter tells Elasticsearch to run the request using contexts from this open point in time. The `keep_alive` parameter tells Elasticsearch how long it should extend the time to live of the point in time.\n", + "summary": "A point in time search", + "value": "{\n \"size\": 100, \n \"query\": {\n \"match\" : {\n \"title\" : \"elasticsearch\"\n }\n },\n \"pit\": {\n \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==\", \n \"keep_alive\": \"1m\" \n }\n}" + }, + "SearchRequestExample3": { + "description": "When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently. The result from running the first `GET /_search` request returns documents belonging to the first slice (`id: 0`). If you run a second request with `id` set to `1', it returns documents in the second slice. Since the maximum number of slices is set to `2`, the union of the results is equivalent to the results of a point-in-time search without slicing.\n", + "summary": "Search slicing", + "value": "{\n \"slice\": {\n \"id\": 0, \n \"max\": 2 \n },\n \"query\": {\n \"match\": {\n \"message\": \"foo\"\n }\n },\n \"pit\": {\n \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -35627,6 +35901,12 @@ } } }, + "examples": { + "SearchResponseExample1": { + "description": "An abbreviated response from `GET /my-index-000001/_search?from=40&size=20` with a simple term query.\n", + "value": "{\n \"took\": 5,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 1,\n \"successful\": 1,\n \"skipped\": 0,\n \"failed\": 0\n },\n \"hits\": {\n \"total\": {\n \"value\": 20,\n \"relation\": \"eq\"\n },\n \"max_score\": 1.3862942,\n \"hits\": [\n {\n \"_index\": \"my-index-000001\",\n \"_id\": \"0\",\n \"_score\": 1.3862942,\n \"_source\": {\n \"@timestamp\": \"2099-11-15T14:12:12\",\n \"http\": {\n \"request\": {\n \"method\": \"get\"\n },\n \"response\": {\n \"status_code\": 200,\n \"bytes\": 1070000\n },\n \"version\": \"1.1\"\n },\n \"source\": {\n \"ip\": \"127.0.0.1\"\n },\n \"message\": \"GET /search HTTP/1.1 200 1070000\",\n \"user\": {\n \"id\": \"kimchy\"\n }\n }\n }\n ]\n }\n}" + } + }, "generics": [ { "name": "TDocument", @@ -41668,6 +41948,12 @@ ] }, "description": "Search a vector tile.\n\nSearch a vector tile for geospatial values.\nBefore using this API, you should be familiar with the Mapbox vector tile specification.\nThe API returns results as a binary mapbox vector tile.\n\nInternally, Elasticsearch translates a vector tile search API request into a search containing:\n\n* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box.\n* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box.\n* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`.\n* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.\n\nFor example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search\n\n```\nGET my-index/_search\n{\n \"size\": 10000,\n \"query\": {\n \"geo_bounding_box\": {\n \"my-geo-field\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"aggregations\": {\n \"grid\": {\n \"geotile_grid\": {\n \"field\": \"my-geo-field\",\n \"precision\": 11,\n \"size\": 65536,\n \"bounds\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"bounds\": {\n \"geo_bounds\": {\n \"field\": \"my-geo-field\",\n \"wrap_longitude\": false\n }\n }\n }\n}\n```\n\nThe API returns results as a binary Mapbox vector tile.\nMapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:\n\n* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query.\n* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data.\n* A meta layer containing:\n * A feature containing a bounding box. By default, this is the bounding box of the tile.\n * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`.\n * Metadata for the search.\n\nThe API only returns features that can display at its zoom level.\nFor example, if a polygon feature has no area at its zoom level, the API omits it.\nThe API returns errors as UTF-8 encoded JSON.\n\nIMPORTANT: You can specify several options for this API as either a query parameter or request body parameter.\nIf you specify both parameters, the query parameter takes precedence.\n\n**Grid precision for geotile**\n\nFor a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels.\n`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`.\nFor example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15.\nThe maximum final precision is 29.\nThe `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`.\nFor example, a value of 8 divides the tile into a grid of 256 x 256 cells.\nThe `aggs` layer only contains features for cells with matching data.\n\n**Grid precision for geohex**\n\nFor a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`.\n\nThis precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation.\nThe following table maps the H3 resolution for each precision.\nFor example, if `` is 3 and `grid_precision` is 3, the precision is 6.\nAt a precision of 6, hexagonal cells have an H3 resolution of 2.\nIf `` is 3 and `grid_precision` is 4, the precision is 7.\nAt a precision of 7, hexagonal cells have an H3 resolution of 3.\n\n| Precision | Unique tile bins | H3 resolution | Unique hex bins |\tRatio |\n| --------- | ---------------- | ------------- | ----------------| ----- |\n| 1 | 4 | 0 | 122 | 30.5 |\n| 2 | 16 | 0 | 122 | 7.625 |\n| 3 | 64 | 1 | 842 | 13.15625 |\n| 4 | 256 | 1 | 842 | 3.2890625 |\n| 5 | 1024 | 2 | 5882 | 5.744140625 |\n| 6 | 4096 | 2 | 5882 | 1.436035156 |\n| 7 | 16384 | 3 | 41162 | 2.512329102 |\n| 8 | 65536 | 3 | 41162 | 0.6280822754 |\n| 9 | 262144 | 4 | 288122 | 1.099098206 |\n| 10 | 1048576 | 4 | 288122 | 0.2747745514 |\n| 11 | 4194304 | 5 | 2016842 | 0.4808526039 |\n| 12 | 16777216 | 6 | 14117882 | 0.8414913416 |\n| 13 | 67108864 | 6 | 14117882 | 0.2103728354 |\n| 14 | 268435456 | 7 | 98825162 | 0.3681524172 |\n| 15 | 1073741824 | 8 | 691776122 | 0.644266719 |\n| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 |\n| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 |\n| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 |\n| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 |\n| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 |\n| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 |\n| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 |\n| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 |\n| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 |\n| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 |\n| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 |\n| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 |\n| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 |\n| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 |\n\nHexagonal cells don't align perfectly on a vector tile.\nSome cells may intersect more than one vector tile.\nTo compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.\nElasticsearch uses the H3 resolution that is closest to the corresponding geotile density.", + "examples": { + "SearchMvtRequestExample1": { + "description": "Run `GET museums/_mvt/location/13/4207/2692` to search an index for `location` values that intersect the `13/4207/2692` vector tile.\n", + "value": "{\n \"grid_agg\": \"geotile\",\n \"grid_precision\": 2,\n \"fields\": [\n \"name\",\n \"price\"\n ],\n \"query\": {\n \"term\": {\n \"included\": true\n }\n },\n \"aggs\": {\n \"min_price\": {\n \"min\": {\n \"field\": \"price\"\n }\n },\n \"max_price\": {\n \"max\": {\n \"field\": \"price\"\n }\n },\n \"avg_price\": {\n \"avg\": {\n \"field\": \"price\"\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -42367,6 +42653,12 @@ ] }, "description": "Run a search with a search template.", + "examples": { + "SearchTemplateRequestExample1": { + "description": "Run `GET my-index/_search/template` to run a search with a search template.\n", + "value": "{\n \"id\": \"my-search-template\",\n \"params\": {\n \"query_string\": \"hello world\",\n \"from\": 0,\n \"size\": 10\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -42892,6 +43184,12 @@ ] }, "description": "Get terms in an index.\n\nDiscover terms that match a partial string in an index.\nThis API is designed for low-latency look-ups used in auto-complete scenarios.\n\n> info\n> The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.", + "examples": { + "TermsEnumRequestExample1": { + "description": "Run `POST stackoverflow/_terms_enum`.", + "value": "{\n \"field\" : \"tags\",\n \"string\" : \"kiba\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -43171,6 +43469,33 @@ ] }, "description": "Get term vector information.\n\nGet information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.", + "examples": { + "TermVectorsRequestExample1": { + "description": "Run `GET /my-index-000001/_termvectors/1` to return all information and statistics for field `text` in document 1.\n", + "summary": "Return stored term vectors", + "value": "{\n \"fields\" : [\"text\"],\n \"offsets\" : true,\n \"payloads\" : true,\n \"positions\" : true,\n \"term_statistics\" : true,\n \"field_statistics\" : true\n}" + }, + "TermVectorsRequestExample2": { + "description": "Run `GET /my-index-000001/_termvectors/1` to set per-field analyzers. A different analyzer than the one at the field may be provided by using the `per_field_analyzer` parameter.\n", + "summary": "Per-field analyzer", + "value": "{\n \"doc\" : {\n \"fullname\" : \"John Doe\",\n \"text\" : \"test test test\"\n },\n \"fields\": [\"fullname\"],\n \"per_field_analyzer\" : {\n \"fullname\": \"keyword\"\n }\n}" + }, + "TermVectorsRequestExample3": { + "description": "Run `GET /imdb/_termvectors` to filter the terms returned based on their tf-idf scores. It returns the three most \"interesting\" keywords from the artificial document having the given \"plot\" field value. Notice that the keyword \"Tony\" or any stop words are not part of the response, as their tf-idf must be too low.\n", + "summary": "Terms filtering", + "value": "{\n \"doc\": {\n \"plot\": \"When wealthy industrialist Tony Stark is forced to build an armored suit after a life-threatening incident, he ultimately decides to use its technology to fight against evil.\"\n },\n \"term_statistics\": true,\n \"field_statistics\": true,\n \"positions\": false,\n \"offsets\": false,\n \"filter\": {\n \"max_num_terms\": 3,\n \"min_term_freq\": 1,\n \"min_doc_freq\": 1\n }\n}" + }, + "TermVectorsRequestExample4": { + "description": "Run `GET /my-index-000001/_termvectors/1`. Term vectors which are not explicitly stored in the index are automatically computed on the fly. This request returns all information and statistics for the fields in document 1, even though the terms haven't been explicitly stored in the index. Note that for the field text, the terms are not regenerated.\n", + "summary": "Generate term vectors on the fly", + "value": "{\n \"fields\" : [\"text\", \"some_field_without_term_vectors\"],\n \"offsets\" : true,\n \"positions\" : true,\n \"term_statistics\" : true,\n \"field_statistics\" : true\n}" + }, + "TermVectorsRequestExample5": { + "description": "Run `GET /my-index-000001/_termvectors`. Term vectors can be generated for artificial documents, that is for documents not present in the index. If dynamic mapping is turned on (default), the document fields not in the original mapping will be dynamically created.\n", + "summary": "Artificial documents", + "value": "{\n \"doc\" : {\n \"fullname\" : \"John Doe\",\n \"text\" : \"test test test\"\n }\n}" + } + }, "generics": [ { "name": "TDocument", @@ -43711,6 +44036,63 @@ ] }, "description": "Update a document.\n\nUpdate a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).", + "examples": { + "UpdateRequestExample1": { + "description": "Run `POST test/_update/1` to increment a counter by using a script.", + "summary": "Update a counter with a script", + "value": "{\n \"script\" : {\n \"source\": \"ctx._source.counter += params.count\",\n \"lang\": \"painless\",\n \"params\" : {\n \"count\" : 4\n }\n }\n}" + }, + "UpdateRequestExample10": { + "description": "Run `POST test/_update/1` to perform a scripted upsert. When `scripted_upsert` is `true`, the script runs whether or not the document exists.\n", + "summary": "Scripted upsert", + "value": "{\n \"scripted_upsert\": true,\n \"script\": {\n \"source\": \"\"\"\n if ( ctx.op == 'create' ) {\n ctx._source.counter = params.count\n } else {\n ctx._source.counter += params.count\n }\n \"\"\",\n \"params\": {\n \"count\": 4\n }\n },\n \"upsert\": {}\n}" + }, + "UpdateRequestExample11": { + "description": "Run `POST test/_update/1` to perform a doc as upsert. Instead of sending a partial `doc` plus an `upsert` doc, you can set `doc_as_upsert` to `true` to use the contents of `doc` as the `upsert` value.\n", + "summary": "Doc as upsert", + "value": "{\n \"doc\": {\n \"name\": \"new_name\"\n },\n \"doc_as_upsert\": true\n}" + }, + "UpdateRequestExample2": { + "description": "Run `POST test/_update/1` to use a script to add a tag to a list of tags. In this example, it is just a list, so the tag is added even it exists.\n", + "summary": "Add a tag with a script", + "value": "{\n \"script\": {\n \"source\": \"ctx._source.tags.add(params.tag)\",\n \"lang\": \"painless\",\n \"params\": {\n \"tag\": \"blue\"\n }\n }\n}" + }, + "UpdateRequestExample3": { + "description": "Run `POST test/_update/1` to use a script to remove a tag from a list of tags. The Painless function to remove a tag takes the array index of the element you want to remove. To avoid a possible runtime error, you first need to make sure the tag exists. If the list contains duplicates of the tag, this script just removes one occurrence.\n", + "summary": "Remove a tag with a script", + "value": "{\n \"script\": {\n \"source\": \"if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }\",\n \"lang\": \"painless\",\n \"params\": {\n \"tag\": \"blue\"\n }\n }\n}" + }, + "UpdateRequestExample4": { + "description": "Run `POST test/_update/1` to use a script to add a field `new_field` to the document.\n", + "summary": "Add fields with a script", + "value": "{\n \"script\" : \"ctx._source.new_field = 'value_of_new_field'\"\n}" + }, + "UpdateRequestExample5": { + "description": "Run `POST test/_update/1` to use a script to remove a field `new_field` from the document.\n", + "summary": "Remove fields with a script", + "value": "{\n \"script\" : \"ctx._source.remove('new_field')\"\n}" + }, + "UpdateRequestExample6": { + "description": "Run `POST test/_update/1` to use a script to remove a subfield from an object field.\n", + "summary": "Remove subfields with a script", + "value": "{\n \"script\": \"ctx._source['my-object'].remove('my-subfield')\"\n}" + }, + "UpdateRequestExample7": { + "description": "Run `POST test/_update/1` to change the operation that runs from within the script. For example, this request deletes the document if the `tags` field contains `green`, otherwise it does nothing (`noop`).\n", + "summary": "Change the operation with a script", + "value": "{\n \"script\": {\n \"source\": \"if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'noop' }\",\n \"lang\": \"painless\",\n \"params\": {\n \"tag\": \"green\"\n }\n }\n}" + }, + "UpdateRequestExample8": { + "description": "Run `POST test/_update/1` to do a partial update that adds a new field to the existing document.\n", + "summary": "Update part of a document", + "value": "{\n \"doc\": {\n \"name\": \"new_name\"\n }\n}" + }, + "UpdateRequestExample9": { + "description": "Run `POST test/_update/1` to perfom an upsert. If the document does not already exist, the contents of the upsert element are inserted as a new document. If the document exists, the script is run.\n", + "summary": "Upsert", + "value": "{\n \"script\": {\n \"source\": \"ctx._source.counter += params.count\",\n \"lang\": \"painless\",\n \"params\": {\n \"count\": 4\n }\n },\n \"upsert\": {\n \"counter\": 1\n }\n}" + } + }, "generics": [ { "name": "TDocument", @@ -44076,6 +44458,28 @@ ] }, "description": "Update documents.\nUpdates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Update the document source**\n\nUpdate by query supports scripts to update the document source.\nAs with the update API, you can set `ctx.op` to change the operation that is performed.\n\nSet `ctx.op = \"noop\"` if your script decides that it doesn't have to make any changes.\nThe update by query operation skips updating the document and increments the `noop` counter.\n\nSet `ctx.op = \"delete\"` if your script decides that the document should be deleted.\nThe update by query operation deletes the document and increments the `deleted` counter.\n\nUpdate by query supports only `index`, `noop`, and `delete`.\nSetting `ctx.op` to anything else is an error.\nSetting any other field in `ctx` is an error.\nThis API enables you to only modify the source of matching documents; you cannot move them.", + "examples": { + "UpdateByQueryRequestExample1": { + "description": "Run `POST my-index-000001/_update_by_query?conflicts=proceed` to update documents that match a query.\n", + "summary": "Update selected documents", + "value": "{\n \"query\": { \n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n}" + }, + "UpdateByQueryRequestExample2": { + "description": "Run `POST my-index-000001/_update_by_query` with a script to update the document source. It increments the `count` field for all documents with a `user.id` of `kimchy` in `my-index-000001`.\n", + "summary": "Update the document source", + "value": "{\n \"script\": {\n \"source\": \"ctx._source.count++\",\n \"lang\": \"painless\"\n },\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n}" + }, + "UpdateByQueryRequestExample3": { + "description": "Run `POST my-index-000001/_update_by_query` to slice an update by query manually. Provide a slice ID and total number of slices to each request.\n", + "summary": "Slice manually", + "value": "{\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n },\n \"script\": {\n \"source\": \"ctx._source['extra'] = 'test'\"\n }\n}" + }, + "UpdateByQueryRequestExample4": { + "description": "Run `POST my-index-000001/_update_by_query?refresh&slices=5` to use automatic slicing. It automatically parallelizes using sliced scroll to slice on `_id`.\n", + "summary": "Slice automatically", + "value": "{\n \"script\": {\n \"source\": \"ctx._source['extra'] = 'test'\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -90906,6 +91310,12 @@ ] }, "description": "Run an async search.\n\nWhen the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested.\n\nWarning: Asynchronous search does not support scroll or search requests that include only the suggest section.\n\nBy default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error.\nThe maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting.", + "examples": { + "AsyncSearchSubmitRequestExample1": { + "description": "Perform a search request asynchronously with `POST /sales*/_async_search?size=0`. It accepts the same parameters and request body as the search API.\n", + "value": "{\n \"sort\": [\n { \"date\": { \"order\": \"asc\" } }\n ],\n \"aggs\": {\n \"sale_date\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"calendar_interval\": \"1d\"\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -91891,6 +92301,13 @@ } ] }, + "examples": { + "GetAutoscalingCapacityResponseExample1": { + "description": "This may be a response to `GET /_autoscaling/capacity`.", + "summary": "A successful response for retrieving the current autoscaling capacity.", + "value": "{\n policies: {}\n}" + } + }, "name": { "name": "Response", "namespace": "autoscaling.get_autoscaling_capacity" @@ -91959,6 +92376,13 @@ } } }, + "examples": { + "GetAutoscalingPolicyResponseExample1": { + "description": "This may be a response to `GET /_autoscaling/policy/my_autoscaling_policy`.", + "summary": "A successful response for retrieving an autoscaling policy.", + "value": "{\n \"roles\": ,\n \"deciders\": \n}" + } + }, "name": { "name": "Response", "namespace": "autoscaling.get_autoscaling_policy" @@ -91982,6 +92406,17 @@ } }, "description": "Create or update an autoscaling policy.\n\nNOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.", + "examples": { + "PutAutoscalingPolicyRequestExample1": { + "summary": "Creates or updates an autoscaling policy.", + "value": "{\n \"roles\": [],\n \"deciders\": {\n \"fixed\": {\n }\n }\n}" + }, + "PutAutoscalingPolicyRequestExample2": { + "description": "The API method and path for this request: `PUT /_autoscaling/policy/my_autoscaling_policy`. It creates `my_autoscaling_policy` using the fixed autoscaling decider, applying to the set of nodes having (only) the `data_hot` role.", + "summary": "Creates an autoscaling policy.", + "value": "{\n \"roles\" : [ \"data_hot\" ],\n \"deciders\": {\n \"fixed\": {\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -92048,6 +92483,12 @@ } } }, + "examples": { + "PutAutoscalingPolicyResponseExample1": { + "summary": "A successful response when creating an autoscaling policy.", + "value": "{\n \"acknowledged\": true\n}" + } + }, "name": { "name": "Response", "namespace": "autoscaling.put_autoscaling_policy" @@ -93484,6 +93925,12 @@ } } }, + "examples": { + "CatAliasesResponseExample1": { + "description": "A successful response from `GET _cat/aliases?v=true`. This response shows that `alias2` has configured a filter and `alias3` and `alias4` have routing configurations.\n", + "value": "alias index filter routing.index routing.search is_write_index\nalias1 test1 - - - -\nalias2 test1 * - - -\nalias3 test1 - 1 1 -\nalias4 test1 - 2 1,2 -" + } + }, "name": { "name": "Response", "namespace": "cat.aliases" @@ -93930,6 +94377,12 @@ } } }, + "examples": { + "CatAllocationResponseExample1": { + "description": "A successful response from `GET /_cat/allocation?v=true`. It shows a single shard is allocated to the one node available.\n", + "value": "shards shards.undesired write_load.forecast disk.indices.forecast disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role\n 1 0 0.0 260b 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst" + } + }, "name": { "name": "Response", "namespace": "cat.allocation" @@ -94114,6 +94567,12 @@ } } }, + "examples": { + "CatComponentTemplatesResponseExample1": { + "description": "A successful response from `GET _cat/aliases?v=true`. This response shows that `alias2` has configured a filter and `alias3` and `alias4` have routing configurations.\n", + "value": "alias index filter routing.index routing.search is_write_index\nalias1 test1 - - - -\nalias2 test1 * - - -\nalias3 test1 - 1 1 -\nalias4 test1 - 2 1,2 -" + } + }, "name": { "name": "Response", "namespace": "cat.component_templates" @@ -94250,6 +94709,18 @@ } } }, + "examples": { + "CatCountResponseExample1": { + "description": "A successful response from `GET /_cat/count/my-index-000001?v=true`. It retrieves the document count for the `my-index-000001` data stream or index.\n", + "summary": "Single data stream or index count", + "value": "epoch timestamp count\n1475868259 15:24:20 120" + }, + "CatCountResponseExample2": { + "description": "A successful response from `GET /_cat/count?v=true`. It retrieves the document count for all data streams and indices in the cluster.\n", + "summary": "All data streams and indices count", + "value": "epoch timestamp count\n1475868259 15:24:20 121" + } + }, "name": { "name": "Response", "namespace": "cat.count" @@ -94424,6 +94895,18 @@ } } }, + "examples": { + "CatFielddataResponseExample1": { + "description": "A successful response from `GET /_cat/fielddata?v=true&fields=body`. You can specify an individual field in the request body or URL path. This example retrieves heap memory size information for the `body` field.\n", + "summary": "Single field data", + "value": "id host ip node field size\nNqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body 544b" + }, + "CatFielddataResponseExample2": { + "description": "A successful response from `GET /_cat/fielddata/body,soul?v=true`. You can specify a comma-separated list of fields in the request body or URL path. This example retrieves heap memory size information for the `body` and `soul` fields. To get information for all fields, run `GET /_cat/fielddata?v=true`.\n", + "summary": "Multiple fields data", + "value": "id host ip node field size\nNqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body 544b\nNqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in soul 480b" + } + }, "name": { "name": "Response", "namespace": "cat.fielddata" @@ -94768,6 +95251,12 @@ } } }, + "examples": { + "CatHealthResponseExample1": { + "description": "A successful response from `GET /_cat/health?v=true`. By default, it returns `HH:MM:SS` and Unix epoch timestamps.\n", + "value": "epoch timestamp cluster status node.total node.data shards pri relo init unassign unassign.pri pending_tasks max_task_wait_time active_shards_percent\n1475871424 16:17:04 elasticsearch green 1 1 1 1 0 0 0 0 0 - 100.0%" + } + }, "name": { "name": "Response", "namespace": "cat.health" @@ -97022,6 +97511,12 @@ } } }, + "examples": { + "CatIndicesResponseExample1": { + "description": "A successful response from `GET /_cat/indices/my-index-*?v=true&s=index`.\n", + "value": "health status index uuid pri rep docs.count docs.deleted store.size pri.store.size dataset.size\nyellow open my-index-000001 u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb 88.1kb\ngreen open my-index-000002 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b 260b" + } + }, "name": { "name": "Response", "namespace": "cat.indices" @@ -97158,6 +97653,12 @@ } } }, + "examples": { + "CatMasterResponseExample1": { + "description": "A successful response from `GET /_cat/master?v=true`.\n", + "value": "id host ip node\nYzWoH_2BT-6UjVGDyPdqYg 127.0.0.1 127.0.0.1 YzWoH_2" + } + }, "name": { "name": "Response", "namespace": "cat.master" @@ -97535,6 +98036,12 @@ } } }, + "examples": { + "CatDataframeanalyticsResponseExample1": { + "description": "A successful response from `GET _cat/ml/data_frame/analytics?v=true`.", + "value": "id create_time type state\nclassifier_job_1 2020-02-12T11:49:09.594Z classification stopped\nclassifier_job_2 2020-02-12T11:49:14.479Z classification stopped\nclassifier_job_3 2020-02-12T11:49:16.928Z classification stopped\nclassifier_job_4 2020-02-12T11:49:19.127Z classification stopped\nclassifier_job_5 2020-02-12T11:49:21.349Z classification stopped" + } + }, "name": { "name": "Response", "namespace": "cat.ml_data_frame_analytics" @@ -97840,6 +98347,12 @@ } } }, + "examples": { + "CatDatafeedsResponseExample1": { + "description": "A successful response from `GET _cat/ml/datafeeds?v=true`.", + "value": "id state buckets.count search.count\ndatafeed-high_sum_total_sales stopped 743 7\ndatafeed-low_request_rate stopped 1457 3\ndatafeed-response_code_rates stopped 1460 18\ndatafeed-url_scanning stopped 1460 18" + } + }, "name": { "name": "Response", "namespace": "cat.ml_datafeeds" @@ -98923,6 +99436,12 @@ } } }, + "examples": { + "CatJobsResponseExample1": { + "description": "A succesful response from `GET _cat/component_templates/my-template-*?v=true&s=name`.", + "value": "name version alias_count mapping_count settings_count metadata_count included_in\nmy-template-1 0 0 1 0 [my-index-template]\nmy-template-2 0 3 0 0 [my-index-template]" + } + }, "name": { "name": "Response", "namespace": "cat.ml_jobs" @@ -99067,6 +99586,12 @@ } } }, + "examples": { + "CatTrainedModelsResponseExample1": { + "description": "A successful response from `GET _cat/ml/trained_models?h=c,o,l,ct,v&v=true`.", + "value": "id created_by operations license create_time version\nddddd-1580216177138 _xpack 196 PLATINUM 2020-01-28T12:56:17.138Z 8.0.0\nflight-regress-1580215685537 _xpack 102 PLATINUM 2020-01-28T12:48:05.537Z 8.0.0\nlang_ident_model_1 _xpack 39629 BASIC 2019-12-05T12:28:34.594Z 7.6.0" + } + }, "name": { "name": "Response", "namespace": "cat.ml_trained_models" @@ -99542,6 +100067,18 @@ } } }, + "examples": { + "CatNodeAttributesResponseExample1": { + "description": "A successful response from `GET /_cat/nodeattrs?v=true`. The `node`, `host`, and `ip` columns provide basic information about each node. The `attr` and `value` columns return custom node attributes, one per line.\n", + "summary": "Default columns", + "value": "node host ip attr value\nnode-0 127.0.0.1 127.0.0.1 testattr test" + }, + "CatNodeAttributesResponseExample2": { + "description": "A successful response from `GET /_cat/nodeattrs?v=true&h=name,pid,attr,value`. It returns the `name`, `pid`, `attr`, and `value` columns.\n", + "summary": "Explicit columns", + "value": "name pid attr value\nnode-0 19566 testattr test" + } + }, "name": { "name": "Response", "namespace": "cat.nodeattrs" @@ -101178,6 +101715,18 @@ } } }, + "examples": { + "CatNodesResponseExample1": { + "description": "A successful response from `GET /_cat/nodes?v=true`. The `ip`, `heap.percent`, `ram.percent`, `cpu`, and `load_*` columns provide the IP addresses and performance information of each node. The `node.role`, `master`, and `name` columns provide information useful for monitoring an entire cluster, particularly large ones.\n", + "summary": "Default columns", + "value": "ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name\n127.0.0.1 65 99 42 3.07 dim * mJw06l1" + }, + "CatNodesResponseExample2": { + "description": "A successful response from `GET /_cat/nodes?v=true&h=id,ip,port,v,m`. It returns the `id`, `ip`, `port`, `v` (version), and `m` (master) columns.\n", + "summary": "Explicit columns", + "value": "id ip port v m\nveJR 127.0.0.1 59938 8.17.0 *" + } + }, "name": { "name": "Response", "namespace": "cat.nodes" @@ -101332,6 +101881,12 @@ } } }, + "examples": { + "CatPendingTasksResponseExample1": { + "description": "A successful response from `GET /_cat/pending_tasks?v=true`.\n", + "value": "insertOrder timeInQueue priority source\n 1685 855ms HIGH update-mapping [foo][t]\n 1686 843ms HIGH update-mapping [foo][t]\n 1693 753ms HIGH refresh-mapping [foo][[t]]\n 1688 816ms HIGH update-mapping [foo][t]\n 1689 802ms HIGH update-mapping [foo][t]\n 1690 787ms HIGH update-mapping [foo][t]\n 1691 773ms HIGH update-mapping [foo][t]" + } + }, "name": { "name": "Response", "namespace": "cat.pending_tasks" @@ -101514,6 +102069,12 @@ } } }, + "examples": { + "CatPluginsResponseExample1": { + "description": "A successful response from `GET /_cat/plugins?v=true&s=component&h=name,component,version,description`.\n", + "value": "name component version description\nU7321H6 analysis-icu 8.17.0 The ICU Analysis plugin integrates the Lucene ICU module into Elasticsearch, adding ICU-related analysis components.\nU7321H6 analysis-kuromoji 8.17.0 The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into elasticsearch.\nU7321H6 analysis-nori 8.17.0 The Korean (nori) Analysis plugin integrates Lucene nori analysis module into elasticsearch.\nU7321H6 analysis-phonetic 8.17.0 The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch.\nU7321H6 analysis-smartcn 8.17.0 Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into elasticsearch.\nU7321H6 analysis-stempel 8.17.0 The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into elasticsearch.\nU7321H6 analysis-ukrainian 8.17.0 The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch.\nU7321H6 discovery-azure-classic 8.17.0 The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism\nU7321H6 discovery-ec2 8.17.0 The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism.\nU7321H6 discovery-gce 8.17.0 The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.\nU7321H6 mapper-annotated-text 8.17.0 The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.\nU7321H6 mapper-murmur3 8.17.0 The Mapper Murmur3 plugin allows to compute hashes of a field's values at index-time and to store them in the index.\nU7321H6 mapper-size 8.17.0 The Mapper Size plugin allows document to record their uncompressed size at index time.\nU7321H6 store-smb 8.17.0 The Store SMB plugin adds support for SMB stores." + } + }, "name": { "name": "Response", "namespace": "cat.plugins" @@ -102044,6 +102605,23 @@ } } }, + "examples": { + "CatRecoveryResponseExample1": { + "description": "A successful response from `GET _cat/recovery?v=true`. In this example, the source and target nodes are the same because the recovery type is `store`, meaning they were read from local storage on node start.\n", + "summary": "No ongoing recoveries", + "value": "index shard time type stage source_host source_node target_host target_node repository snapshot files files_recovered files_percent files_total bytes bytes_recovered bytes_percent bytes_total translog_ops translog_ops_recovered translog_ops_percent\nmy-index-000001 0 13ms store done n/a n/a 127.0.0.1 node-0 n/a n/a 0 0 100% 13 0b 0b 100% 9928b 0 0 100.0%" + }, + "CatRecoveryResponseExample2": { + "description": "A successful response from `GET _cat/recovery?v=true&h=i,s,t,ty,st,shost,thost,f,fp,b,bp`. You can retrieve information about an ongoing recovery for example when you increase the replica count of an index and bring another node online to host the replicas. In this example, the recovery type is `peer`, meaning the shard recovered from another node. The `files` and `bytes` are real-time measurements.\n", + "summary": "A live shard recovery", + "value": "i s t ty st shost thost f fp b bp\nmy-index-000001 0 1252ms peer done 192.168.1.1 192.168.1.2 0 100.0% 0b 100.0%" + }, + "CatRecoveryResponseExample3": { + "description": "A successful response from `GET _cat/recovery?v=true&h=i,s,t,ty,st,rep,snap,f,fp,b,bp`. You can restore backups of an index using the snapshot and restore API. You can use the cat recovery API to get information about a snapshot recovery.\n", + "summary": "A snapshot recovery", + "value": "i s t ty st rep snap f fp b bp\nmy-index-000001 0 1978ms snapshot done my-repo snap-1 79 8.0% 12086 9.0%" + } + }, "name": { "name": "Response", "namespace": "cat.recovery" @@ -102156,6 +102734,12 @@ } } }, + "examples": { + "CatRepositoriesResponseExample1": { + "description": "A successful response from `GET /_cat/repositories?v=true`.\n", + "value": "id type\nrepo1 fs\nrepo2 s3" + } + }, "name": { "name": "Response", "namespace": "cat.repositories" @@ -102253,6 +102837,12 @@ } } }, + "examples": { + "CatSegmentsResponseExample1": { + "description": "A successful response from `GET /_cat/segments?v=true`.\n", + "value": "index shard prirep ip segment generation docs.count docs.deleted size size.memory committed searchable version compound\ntest 0 p 127.0.0.1 _0 0 1 0 3kb 0 false true 9.12.0 true\ntest1 0 p 127.0.0.1 _0 0 1 0 3kb 0 false true 9.12.0 true" + } + }, "name": { "name": "Response", "namespace": "cat.segments" @@ -102589,6 +103179,33 @@ } } }, + "examples": { + "CatShardsResponseExample1": { + "description": "A successful response from `GET _cat/shards`.\n", + "summary": "A single data stream or index", + "value": "my-index-000001 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA" + }, + "CatShardsResponseExample2": { + "description": "A successful response from `GET _cat/shards/my-index-*`. It returns information for any data streams or indices beginning with `my-index-`.\n", + "summary": "A wildcard pattern", + "value": "my-index-000001 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA" + }, + "CatShardsResponseExample3": { + "description": "A successful response from `GET _cat/shards`. The `RELOCATING` value in the `state` column indicates the index shard is relocating.\n", + "summary": "A relocating shard", + "value": "my-index-000001 0 p RELOCATING 3014 31.1mb 192.168.56.10 H5dfFeA -> -> 192.168.56.30 bGG90GE" + }, + "CatShardsResponseExample4": { + "description": "A successful response from `GET _cat/shards`. Before a shard is available for use, it goes through an `INITIALIZING` state. You can use the cat shards API to see which shards are initializing.\n", + "summary": "Shard states", + "value": "my-index-000001 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA\nmy-index-000001 0 r INITIALIZING 0 14.3mb 192.168.56.30 bGG90GE" + }, + "CatShardsResponseExample5": { + "description": "A successful response from `GET _cat/shards?h=index,shard,prirep,state,unassigned.reason`. It includes the `unassigned.reason` column, which indicates why a shard is unassigned.\n", + "summary": "Reasons for unassigned shards", + "value": "my-index-000001 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA\nmy-index-000001 0 r STARTED 3014 31.1mb 192.168.56.30 bGG90GE\nmy-index-000001 0 r STARTED 3014 31.1mb 192.168.56.20 I8hydUG\nmy-index-000001 0 r UNASSIGNED ALLOCATION_FAILED" + } + }, "name": { "name": "Response", "namespace": "cat.shards" @@ -103967,6 +104584,12 @@ } } }, + "examples": { + "CatSnapshotsResponseExample1": { + "description": "A successful response from `GET /_cat/snapshots/repo1?v=true&s=id`.\n", + "value": "id repository status start_epoch start_time end_epoch end_time duration indices successful_shards failed_shards total_shards\nsnap1 repo1 FAILED 1445616705 18:11:45 1445616978 18:16:18 4.6m 1 4 1 5\nsnap2 repo1 SUCCESS 1445634298 23:04:58 1445634672 23:11:12 6.2m 2 10 0 10" + } + }, "name": { "name": "Response", "namespace": "cat.snapshots" @@ -104352,6 +104975,12 @@ } } }, + "examples": { + "CatTasksResponseExample1": { + "description": "A successful response from `GET _cat/tasks?v=true`.", + "value": "action task_id parent_task_id type start_time timestamp running_time ip node\ncluster:monitor/tasks/lists[n] oTUltX4IQMOUUVeiohTt8A:124 oTUltX4IQMOUUVeiohTt8A:123 direct 1458585884904 01:48:24 44.1micros 127.0.0.1:9300 oTUltX4IQMOUUVeiohTt8A\ncluster:monitor/tasks/lists oTUltX4IQMOUUVeiohTt8A:123 - transport 1458585884904 01:48:24 186.2micros 127.0.0.1:9300 oTUltX4IQMOUUVeiohTt8A" + } + }, "name": { "name": "Response", "namespace": "cat.tasks" @@ -104683,6 +105312,12 @@ } } }, + "examples": { + "CatTemplatesResponseExample1": { + "description": "A successful response from `GET _cat/templates/my-template-*?v=true&s=name`.\n", + "value": "name index_patterns order version composed_of\nmy-template-0 [te*] 500 []\nmy-template-1 [tea*] 501 []\nmy-template-2 [teak*] 502 7 []" + } + }, "name": { "name": "Response", "namespace": "cat.templates" @@ -104878,6 +105513,18 @@ } } }, + "examples": { + "CatThreadPoolResponseExample1": { + "description": "A successful response from `GET /_cat/thread_pool`.\n", + "summary": "Default columns", + "value": "node-0 analyze 0 0 0\nnode-0 fetch_shard_started 0 0 0\nnode-0 fetch_shard_store 0 0 0\nnode-0 flush 0 0 0\nnode-0 write 0 0 0" + }, + "CatThreadPoolResponseExample2": { + "description": "A successful response from `GET /_cat/thread_pool/generic?v=true&h=id,name,active,rejected,completed`. It returns the `id`, `name`, `active`, `rejected`, and `completed` columns. It also limits returned information to the generic thread pool.\n", + "summary": "Explicit columns", + "value": "id name active rejected completed\n0EWUhXeBQtaVGlexUeVwMg generic 0 0 70" + } + }, "name": { "name": "Response", "namespace": "cat.thread_pool" @@ -105371,6 +106018,12 @@ } } }, + "examples": { + "CatTransformsResponseExample1": { + "description": "A successful response from `GET /_cat/transforms?v=true&format=json`.", + "value": "[\n {\n \"id\" : \"ecommerce_transform\",\n \"state\" : \"started\",\n \"checkpoint\" : \"1\",\n \"documents_processed\" : \"705\",\n \"checkpoint_progress\" : \"100.00\",\n \"changes_last_detection_time\" : null\n }\n]" + } + }, "name": { "name": "Response", "namespace": "cat.transforms" @@ -106692,6 +107345,12 @@ ] }, "description": "Create a follower.\nCreate a cross-cluster replication follower index that follows a specific leader index.\nWhen the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index.", + "examples": { + "CreateFollowIndexRequestExample1": { + "description": "Run `PUT /follower_index/_ccr/follow?wait_for_active_shards=1` to create a follower index named `follower_index`.", + "value": "{\n \"remote_cluster\" : \"remote_cluster\",\n \"leader_index\" : \"leader_index\",\n \"settings\": {\n \"index.number_of_replicas\": 0\n },\n \"max_read_request_operation_count\" : 1024,\n \"max_outstanding_read_requests\" : 16,\n \"max_read_request_size\" : \"1024k\",\n \"max_write_request_operation_count\" : 32768,\n \"max_write_request_size\" : \"16k\",\n \"max_outstanding_write_requests\" : 8,\n \"max_write_buffer_count\" : 512,\n \"max_write_buffer_size\" : \"512k\",\n \"max_retry_delay\" : \"10s\",\n \"read_poll_timeout\" : \"30s\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -107217,6 +107876,12 @@ ] }, "description": "Forget a follower.\nRemove the cross-cluster replication follower retention leases from the leader.\n\nA following index takes out retention leases on its leader index.\nThese leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication.\nWhen a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed.\nHowever, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable.\nWhile the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index.\nThis API exists to enable manually removing the leases when the unfollow API is unable to do so.\n\nNOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader.\nThe only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked.", + "examples": { + "ForgetFollowerIndexRequestExample1": { + "description": "Run `POST //_ccr/forget_follower`.", + "value": "{\n \"follower_cluster\" : \"\",\n \"follower_index\" : \"\",\n \"follower_index_uuid\" : \"\",\n \"leader_remote_cluster\" : \"\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -107824,6 +108489,12 @@ ] }, "description": "Create or update auto-follow patterns.\nCreate a collection of cross-cluster replication auto-follow patterns for a remote cluster.\nNewly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices.\nIndices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern.\n\nThis API can also be used to update auto-follow patterns.\nNOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns.", + "examples": { + "PutAutoFollowPatternRequestExample1": { + "description": "Run `PUT /_ccr/auto_follow/my_auto_follow_pattern` to creates an auto-follow pattern.\n", + "value": "{\n \"remote_cluster\" : \"remote_cluster\",\n \"leader_index_patterns\" :\n [\n \"leader_index*\"\n ],\n \"follow_index_pattern\" : \"{{leader_index}}-follower\",\n \"settings\": {\n \"index.number_of_replicas\": 0\n },\n \"max_read_request_operation_count\" : 1024,\n \"max_outstanding_read_requests\" : 16,\n \"max_read_request_size\" : \"1024k\",\n \"max_write_request_operation_count\" : 32768,\n \"max_write_request_size\" : \"16k\",\n \"max_outstanding_write_requests\" : 8,\n \"max_write_buffer_count\" : 512,\n \"max_write_buffer_size\" : \"512k\",\n \"max_retry_delay\" : \"10s\",\n \"read_poll_timeout\" : \"30s\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -108072,6 +108743,12 @@ ] }, "description": "Resume a follower.\nResume a cross-cluster replication follower index that was paused.\nThe follower index could have been paused with the pause follower API.\nAlternatively it could be paused due to replication that cannot be retried due to failures during following tasks.\nWhen this API returns, the follower index will resume fetching operations from the leader index.", + "examples": { + "ResumeFollowIndexRequestExample1": { + "description": "Run `POST /follower_index/_ccr/resume_follow` to resume the follower index.", + "value": "{\n \"max_read_request_operation_count\" : 1024,\n \"max_outstanding_read_requests\" : 16,\n \"max_read_request_size\" : \"1024k\",\n \"max_write_request_operation_count\" : 32768,\n \"max_write_request_size\" : \"16k\",\n \"max_outstanding_write_requests\" : 8,\n \"max_write_buffer_count\" : 512,\n \"max_write_buffer_size\" : \"512k\",\n \"max_retry_delay\" : \"10s\",\n \"read_poll_timeout\" : \"30s\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -109323,6 +110000,12 @@ ] }, "description": "Explain the shard allocations.\nGet explanations for shard allocations in the cluster.\nFor unassigned shards, it provides an explanation for why the shard is unassigned.\nFor assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node.\nThis API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise.", + "examples": { + "ClusterAllocationExplainRequestExample1": { + "description": "Run `GET _cluster/allocation/explain` to get an explanation for a shard's current allocation.", + "value": "{\n \"index\": \"my-index-000001\",\n \"shard\": 0,\n \"primary\": false,\n \"current_node\": \"my-node\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -111502,6 +112185,17 @@ ] }, "description": "Create or update a component template.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.", + "examples": { + "ClusterPutComponentTemplateRequestExample1": { + "summary": "Create a template", + "value": "{\n \"template\": null,\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"mappings\": {\n \"_source\": {\n \"enabled\": false\n },\n \"properties\": {\n \"host_name\": {\n \"type\": \"keyword\"\n },\n \"created_at\": {\n \"type\": \"date\",\n \"format\": \"EEE MMM dd HH:mm:ss Z yyyy\"\n }\n }\n }\n}" + }, + "ClusterPutComponentTemplateRequestExample2": { + "description": "You can include index aliases in a component template. During index creation, the `{index}` placeholder in the alias name will be replaced with the actual index name that the template gets applied to.\n", + "summary": "Create a template with aliases", + "value": "{\n \"template\": null,\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"aliases\": {\n \"alias1\": {},\n \"alias2\": {\n \"filter\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"routing\": \"shard-1\"\n },\n \"{index}-alias\": {}\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -111621,6 +112315,18 @@ ] }, "description": "Update the cluster settings.\n\nConfigure and update dynamic settings on a running cluster.\nYou can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`.\n\nUpdates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart.\nYou can also reset transient or persistent settings by assigning them a null value.\n\nIf you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value.\nFor example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting.\nHowever, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting.\n\nTIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster.\nIf you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings.\nOnly use `elasticsearch.yml` for static cluster settings and node settings.\nThe API doesn’t require a restart and ensures a setting’s value is the same on all nodes.\n\nWARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead.\nIf a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration.", + "examples": { + "ClusterPutSettingsRequestExample1": { + "description": "An example of a persistent update.", + "summary": "A simple setting", + "value": "{\n \"persistent\" : {\n \"indices.recovery.max_bytes_per_sec\" : \"50mb\"\n }\n}" + }, + "ClusterPutSettingsRequestExample2": { + "description": "PUT `/_cluster/settings` to update the `action.auto_create_index` setting. The setting accepts a comma-separated list of patterns that you want to allow or you can prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. In this example, the auto-creation of indices called `my-index-000001` or `index10` is allowed, the creation of indices that match the pattern `index1*` is blocked, and the creation of any other indices that match the `ind*` pattern is allowed. Patterns are matched in the order specified.\n", + "summary": "A setting with multiple patterns", + "value": "{\n \"persistent\": {\n \"action.auto_create_index\": \"my-index-000001,index10,-index1*,+ind*\" \n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -112333,6 +113039,12 @@ ] }, "description": "Reroute the cluster.\nManually change the allocation of individual shards in the cluster.\nFor example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node.\n\nIt is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state.\nFor example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out.\n\nThe cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting.\nIf allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing.\n\nThe cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated.\nThis scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes.\n\nOnce the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards.", + "examples": { + "ClusterRerouteRequestExample1": { + "description": "Run `POST /_cluster/reroute?metric=none` to changes the allocation of shards in a cluster.", + "value": "{\n \"commands\": [\n {\n \"move\": {\n \"index\": \"test\", \"shard\": 0,\n \"from_node\": \"node1\", \"to_node\": \"node2\"\n }\n },\n {\n \"allocate_replica\": {\n \"index\": \"test\", \"shard\": 1,\n \"node\": \"node3\"\n }\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -117609,6 +118321,11 @@ ] }, "description": "Update the connector last sync stats.\n\nUpdate the fields related to the last sync of a connector.\nThis action is used for analytics and monitoring.", + "examples": { + "ConnectorUpdateLastSyncRequestExample1": { + "value": "{\n \"last_access_control_sync_error\": \"Houston, we have a problem!\",\n \"last_access_control_sync_scheduled_at\": \"2023-11-09T15:13:08.231Z\",\n \"last_access_control_sync_status\": \"pending\",\n \"last_deleted_document_count\": 42,\n \"last_incremental_sync_scheduled_at\": \"2023-11-09T15:13:08.231Z\",\n \"last_indexed_document_count\": 42,\n \"last_sync_error\": \"Houston, we have a problem!\",\n \"last_sync_scheduled_at\": \"2024-11-09T15:13:08.231Z\",\n \"last_sync_status\": \"completed\",\n \"last_synced\": \"2024-11-09T15:13:08.231Z\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -118010,6 +118727,14 @@ ] }, "description": "Create or update a connector.", + "examples": { + "ConnectorPutRequestExample1": { + "value": "{\n \"index_name\": \"search-google-drive\",\n \"name\": \"My Connector\",\n \"service_type\": \"google_drive\"\n}" + }, + "ConnectorPutRequestExample2": { + "value": "{\n \"index_name\": \"search-google-drive\",\n \"name\": \"My Connector\",\n \"description\": \"My Connector to sync data to Elastic index from Google Drive\",\n \"service_type\": \"google_drive\",\n \"language\": \"english\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -118327,6 +119052,11 @@ ] }, "description": "Set a connector sync job error.\nSet the `error` field for a connector sync job and set its `status` to `error`.\n\nTo sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure.\nThis service runs automatically on Elastic Cloud for Elastic managed connectors.", + "examples": { + "SyncJobErrorRequestExample1": { + "value": "{\n \"error\": \"some-error\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -118602,6 +119332,11 @@ ] }, "description": "Create a connector sync job.\n\nCreate a connector sync job document in the internal index and initialize its counters and timestamps with default values.", + "examples": { + "SyncJobPostRequestExample1": { + "value": "{\n \"id\": \"connector-id\",\n \"job_type\": \"full\",\n \"trigger_method\": \"on_demand\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -118855,6 +119590,11 @@ ] }, "description": "Update the connector API key ID.\n\nUpdate the `api_key_id` and `api_key_secret_id` fields of a connector.\nYou can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored.\nThe connector secret ID is required only for Elastic managed (native) connectors.\nSelf-managed connectors (connector clients) do not use this field.", + "examples": { + "ConnectorUpdateApiKeyIDRequestExample1": { + "value": "{\n \"api_key_id\": \"my-api-key-id\",\n \"api_key_secret_id\": \"my-connector-secret-id\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -118946,6 +119686,14 @@ ] }, "description": "Update the connector configuration.\n\nUpdate the configuration field in the connector document.", + "examples": { + "ConnectorUpdateConfigurationRequestExample1": { + "value": "{\n \"values\": {\n \"tenant_id\": \"my-tenant-id\",\n \"tenant_name\": \"my-sharepoint-site\",\n \"client_id\": \"foo\",\n \"secret_value\": \"bar\",\n \"site_collections\": \"*\"\n }\n}" + }, + "ConnectorUpdateConfigurationRequestExample2": { + "value": "{\n \"values\": {\n \"secret_value\": \"foo-bar\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -119028,6 +119776,11 @@ ] }, "description": "Update the connector error field.\n\nSet the error field for the connector.\nIf the error provided in the request body is non-null, the connector’s status is updated to error.\nOtherwise, if the error is reset to null, the connector status is updated to connected.", + "examples": { + "ConnectorUpdateErrorRequestExample1": { + "value": "{\n \"error\": \"Houston, we have a problem!\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -119101,6 +119854,14 @@ ] }, "description": "Update the connector features.\nUpdate the connector features in the connector document.\nThis API can be used to control the following aspects of a connector:\n\n* document-level security\n* incremental syncs\n* advanced sync rules\n* basic sync rules\n\nNormally, the running connector service automatically manages these features.\nHowever, you can use this API to override the default behavior.\n\nTo sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure.\nThis service runs automatically on Elastic Cloud for Elastic managed connectors.", + "examples": { + "ConnectorUpdateFeaturesRequestExample1": { + "value": "{\n \"features\": {\n \"document_level_security\": {\n \"enabled\": true\n },\n \"incremental_sync\": {\n \"enabled\": true\n },\n \"sync_rules\": {\n \"advanced\": {\n \"enabled\": false\n },\n \"basic\": {\n \"enabled\": true\n }\n }\n }\n}" + }, + "ConnectorUpdateFeaturesRequestExample2": { + "value": "{\n \"features\": {\n \"document_level_security\": {\n \"enabled\": true\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -119202,6 +119963,14 @@ ] }, "description": "Update the connector filtering.\n\nUpdate the draft filtering configuration of a connector and marks the draft validation state as edited.\nThe filtering draft is activated once validated by the running Elastic connector service.\nThe filtering property is used to configure sync rules (both basic and advanced) for a connector.", + "examples": { + "ConnectorUpdateFilteringRequestExample1": { + "value": "{\n \"rules\": [\n {\n \"field\": \"file_extension\",\n \"id\": \"exclude-txt-files\",\n \"order\": 0,\n \"policy\": \"exclude\",\n \"rule\": \"equals\",\n \"value\": \"txt\"\n },\n {\n \"field\": \"_\",\n \"id\": \"DEFAULT\",\n \"order\": 1,\n \"policy\": \"include\",\n \"rule\": \"regex\",\n \"value\": \".*\"\n }\n ]\n}" + }, + "ConnectorUpdateFilteringRequestExample2": { + "value": "{\n \"advanced_snippet\": {\n \"value\": [{\n \"tables\": [\n \"users\",\n \"orders\"\n ],\n \"query\": \"SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id\"\n }]\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -119357,6 +120126,11 @@ ] }, "description": "Update the connector index name.\n\nUpdate the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored.", + "examples": { + "ConnectorUpdateIndexNameRequestExample1": { + "value": "{\n \"index_name\": \"data-from-my-google-drive\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -119441,6 +120215,11 @@ ] }, "description": "Update the connector name and description.", + "examples": { + "ConnectorUpdateNameRequestExample1": { + "value": "{\n \"name\": \"Custom connector\",\n \"description\": \"This is my customized connector\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -119587,6 +120366,11 @@ ] }, "description": "Update the connector pipeline.\n\nWhen you create a new connector, the configuration of an ingest pipeline is populated with default settings.", + "examples": { + "ConnectorUpdatePipelineRequestExample1": { + "value": "{\n \"pipeline\": {\n \"extract_binary_content\": true,\n \"name\": \"my-connector-pipeline\",\n \"reduce_whitespace\": true,\n \"run_ml_inference\": true\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -119660,6 +120444,14 @@ ] }, "description": "Update the connector scheduling.", + "examples": { + "ConnectorUpdateSchedulingRequestExample1": { + "value": "{\n \"scheduling\": {\n \"access_control\": {\n \"enabled\": true,\n \"interval\": \"0 10 0 * * ?\"\n },\n \"full\": {\n \"enabled\": true,\n \"interval\": \"0 20 0 * * ?\"\n },\n \"incremental\": {\n \"enabled\": false,\n \"interval\": \"0 30 0 * * ?\"\n }\n }\n}" + }, + "ConnectorUpdateSchedulingRequestExample2": { + "value": "{\n \"scheduling\": {\n \"full\": {\n \"enabled\": true,\n \"interval\": \"0 10 0 * * ?\"\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -119733,6 +120525,11 @@ ] }, "description": "Update the connector service type.", + "examples": { + "ConnectorUpdateServiceTypeRequestExample1": { + "value": "{\n \"service_type\": \"sharepoint_online\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -119806,6 +120603,11 @@ ] }, "description": "Update the connector status.", + "examples": { + "ConnectorUpdateStatusRequestExample1": { + "value": "{\n \"status\": \"needs_configuration\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -121868,6 +122670,18 @@ ] }, "description": "Get EQL search results.\nReturns search results for an Event Query Language (EQL) query.\nEQL assumes each document in a data stream or index corresponds to an event.", + "examples": { + "EqlSearchRequestExample1": { + "description": "Run `GET /my-data-stream/_eql/search` to search for events that have a `process.name` of `cmd.exe` and a `process.pid` other than `2013`.\n", + "summary": "Basic query", + "value": "{\n \"query\": \"\"\"\n process where (process.name == \"cmd.exe\" and process.pid != 2013)\n \"\"\"\n}" + }, + "EqlSearchRequestExample2": { + "description": "Run `GET /my-data-stream/_eql/search` to search for a sequence of events. The sequence starts with an event with an `event.category` of `file`, a `file.name` of `cmd.exe`, and a `process.pid` other than `2013`. It is followed by an event with an `event.category` of `process` and a `process.executable` that contains the substring `regsvr32`. These events must also share the same `process.pid` value.\n", + "summary": "Sequence query", + "value": "{\n \"query\": \"\"\"\n sequence by process.pid\n [ file where file.name == \"cmd.exe\" and process.pid != 2013 ]\n [ process where stringContains(process.executable, \"regsvr32\") ]\n \"\"\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -122392,6 +123206,11 @@ ] }, "description": "Run an async ES|QL query.\nAsynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available.\n\nThe API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.", + "examples": { + "AsyncQueryRequestExample1": { + "value": "{\n \"query\": \"\"\"\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n \"\"\",\n \"wait_for_completion_timeout\": \"2s\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -122836,6 +123655,12 @@ ] }, "description": "Run an ES|QL query.\nGet search results for an ES|QL (Elasticsearch query language) query.", + "examples": { + "QueryRequestExample1": { + "description": "Run `POST /_query` to get results for an ES|QL query.", + "value": "{\n \"query\": \"\"\"\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n \"\"\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -125141,6 +125966,12 @@ ] }, "description": "Explore graph analytics.\nExtract and summarize information about the documents and terms in an Elasticsearch data stream or index.\nThe easiest way to understand the behavior of this API is to use the Graph UI to explore connections.\nAn initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph.\nSubsequent requests enable you to spider out from one more vertices of interest.\nYou can exclude vertices that have already been returned.", + "examples": { + "GraphExploreRequestExample1": { + "description": "Run `POST clicklogs/_graph/explore` for a basic exploration An initial graph explore query typically begins with a query to identify strongly related terms. Seed the exploration with a query. This example is searching `clicklogs` for people who searched for the term `midi`.Identify the vertices to include in the graph. This example is looking for product codes that are significantly associated with searches for `midi`. Find the connections. This example is looking for other search terms that led people to click on the products that are associated with searches for `midi`.\n", + "value": "{\n \"query\": {\n \"match\": {\n \"query.raw\": \"midi\"\n }\n },\n \"vertices\": [\n {\n \"field\": \"product\"\n }\n ],\n \"connections\": {\n \"vertices\": [\n {\n \"field\": \"query.raw\"\n }\n ]\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -126842,6 +127673,12 @@ ] }, "description": "Migrate to data tiers routing.\nSwitch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers.\nOptionally, delete one legacy index template.\nUsing node roles enables ILM to automatically move the indices between data tiers.\n\nMigrating away from custom node attributes routing can be manually performed.\nThis API provides an automated way of performing three out of the four manual steps listed in the migration guide:\n\n1. Stop setting the custom hot attribute on new indices.\n1. Remove custom allocation settings from existing ILM policies.\n1. Replace custom allocation settings from existing indices with the corresponding tier preference.\n\nILM must be stopped before performing the migration.\nUse the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_ilm/migrate_to_data_tiers` to migrate the indices, ILM policies, legacy templates, composable, and component templates away from defining custom allocation filtering using the `custom_attribute_name` node attribute. It also deletes the legacy template with name `global-template` if it exists in the system.\n", + "value": "{\n \"legacy_template_to_delete\": \"global-template\",\n \"node_attribute\": \"custom_attribute_name\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -127013,6 +127850,18 @@ ] }, "description": "Move to a lifecycle step.\nManually move an index into a specific step in the lifecycle policy and run that step.\n\nWARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API.\n\nYou must specify both the current step and the step to be executed in the body of the request.\nThe request will fail if the current step does not match the step currently running for the index\nThis is to prevent the index from being moved from an unexpected step into the next step.\n\nWhen specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional.\nIf only the phase is specified, the index will move to the first step of the first action in the target phase.\nIf the phase and action are specified, the index will move to the first step of the specified action in the specified phase.\nOnly actions specified in the ILM policy are considered valid.\nAn index cannot move to a step that is not part of its policy.", + "examples": { + "MoveToStepRequestExample1": { + "description": "Run `POST _ilm/move/my-index-000001` to move `my-index-000001` from the initial step to the `forcemerge` step.\n", + "summary": "Move to forcemerge step", + "value": "{\n \"current_step\": {\n \"phase\": \"new\",\n \"action\": \"complete\",\n \"name\": \"complete\"\n },\n \"next_step\": {\n \"phase\": \"warm\",\n \"action\": \"forcemerge\",\n \"name\": \"forcemerge\"\n }\n}" + }, + "MoveToStepRequestExample2": { + "description": "Run `POST _ilm/move/my-index-000001` to move `my-index-000001` from the end of hot phase into the start of warm.\n", + "summary": "Move to warm step", + "value": "{\n \"current_step\": {\n \"phase\": \"hot\",\n \"action\": \"complete\",\n \"name\": \"complete\"\n },\n \"next_step\": {\n \"phase\": \"warm\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -127125,6 +127974,12 @@ ] }, "description": "Create or update a lifecycle policy.\nIf the specified policy exists, it is replaced and the policy version is incremented.\n\nNOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.", + "examples": { + "PutLifecycleRequestExample1": { + "description": "Run `PUT _ilm/policy/my_policy` to create a new policy with arbitrary metadata.\n", + "value": "{\n \"policy\": {\n \"_meta\": {\n \"description\": \"used for nginx log\",\n \"project\": {\n \"name\": \"myProject\",\n \"department\": \"myDepartment\"\n }\n },\n \"phases\": {\n \"warm\": {\n \"min_age\": \"10d\",\n \"actions\": {\n \"forcemerge\": {\n \"max_num_segments\": 1\n }\n }\n },\n \"delete\": {\n \"min_age\": \"30d\",\n \"actions\": {\n \"delete\": {}\n }\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -132701,6 +133556,43 @@ ] }, "description": "Get tokens from text analysis.\nThe analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "examples": { + "indicesAnalyzeRequestExample1": { + "description": "You can apply any of the built-in analyzers to the text string without specifying an index.", + "summary": "No index specified", + "value": "{\n \"analyzer\": \"standard\",\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample2": { + "description": "If the text parameter is provided as array of strings, it is analyzed as a multi-value field.", + "summary": "An array of text strings", + "value": "{\n \"analyzer\": \"standard\",\n \"text\": [\n \"this is a test\",\n \"the second text\"\n ]\n}" + }, + "indicesAnalyzeRequestExample3": { + "description": "You can test a custom transient analyzer built from tokenizers, token filters, and char filters. Token filters use the filter parameter.", + "summary": "Custom analyzer example 1", + "value": "{\n \"tokenizer\": \"keyword\",\n \"filter\": [\n \"lowercase\"\n ],\n \"char_filter\": [\n \"html_strip\"\n ],\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample4": { + "description": "Custom tokenizers, token filters, and character filters can be specified in the request body.", + "summary": "Custom analyzer example 2", + "value": "{\n \"tokenizer\": \"whitespace\",\n \"filter\": [\n \"lowercase\",\n {\n \"type\": \"stop\",\n \"stopwords\": [\n \"a\",\n \"is\",\n \"this\"\n ]\n }\n ],\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample5": { + "description": "Run `GET /analyze_sample/_analyze` to run an analysis on the text using the default index analyzer associated with the `analyze_sample` index. Alternatively, the analyzer can be derived based on a field mapping.", + "summary": "Derive analyzer from field mapping", + "value": "{\n \"field\": \"obj1.field1\",\n \"text\": \"this is a test\"\n}" + }, + "indicesAnalyzeRequestExample6": { + "description": "Run `GET /analyze_sample/_analyze` and supply a normalizer for a keyword field if there is a normalizer associated with the specified index.", + "summary": "Normalizer", + "value": "{\n \"normalizer\": \"my_normalizer\",\n \"text\": \"BaR\"\n}" + }, + "indicesAnalyzeRequestExample7": { + "description": "If you want to get more advanced details, set `explain` to `true`. It will output all token attributes for each token. You can filter token attributes you want to output by setting the `attributes` option. NOTE: The format of the additional detail information is labelled as experimental in Lucene and it may change in the future.\n", + "summary": "Explain analysis", + "value": "{\n \"tokenizer\": \"standard\",\n \"filter\": [\n \"snowball\"\n ],\n \"text\": \"detailed output\",\n \"explain\": true,\n \"attributes\": [\n \"keyword\"\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -132762,6 +133654,12 @@ } ] }, + "examples": { + "indicesAnalyzeResponseExample7": { + "description": "A successful response for an analysis with `explain` set to `true`.", + "value": "{\n \"detail\": {\n \"custom_analyzer\": true,\n \"charfilters\": [],\n \"tokenizer\": {\n \"name\": \"standard\",\n \"tokens\": [\n {\n \"token\": \"detailed\",\n \"start_offset\": 0,\n \"end_offset\": 8,\n \"type\": \"\",\n \"position\": 0\n },\n {\n \"token\": \"output\",\n \"start_offset\": 9,\n \"end_offset\": 15,\n \"type\": \"\",\n \"position\": 1\n }\n ]\n },\n \"tokenfilters\": [\n {\n \"name\": \"snowball\",\n \"tokens\": [\n {\n \"token\": \"detail\",\n \"start_offset\": 0,\n \"end_offset\": 8,\n \"type\": \"\",\n \"position\": 0,\n \"keyword\": false\n },\n {\n \"token\": \"output\",\n \"start_offset\": 9,\n \"end_offset\": 15,\n \"type\": \"\",\n \"position\": 1,\n \"keyword\": false\n }\n ]\n }\n ]\n }\n}" + } + }, "name": { "name": "Response", "namespace": "indices.analyze" @@ -133084,6 +133982,13 @@ ] }, "description": "Clone an index.\nClone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The index must be marked as read-only and have a cluster health status of green.\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.\n\nThe current write index on a data stream cannot be cloned.\nIn order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.\n\nNOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index.\n\n**Monitor the cloning process**\n\nThe cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`.\n\nThe `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated.\nAt this point, all shards are in the state unassigned.\nIf, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.\n\nOnce the primary shard is allocated, it moves to state initializing, and the clone process begins.\nWhen the clone operation completes, the shard will become active.\nAt that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.\n\n**Wait for active shards**\n\nBecause the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.", + "examples": { + "indicesCloneRequestExample1": { + "description": "Clone `my_source_index` into a new index called `my_target_index` with `POST /my_source_index/_clone/my_target_index`. The API accepts `settings` and `aliases` parameters for the target index.\n", + "summary": "Clone an existing index.", + "value": "{\n \"settings\": {\n \"index.number_of_shards\": 5\n },\n \"aliases\": {\n \"my_search_indices\": {}\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -133510,6 +134415,23 @@ ] }, "description": "Create an index.\nYou can use the create index API to add a new index to an Elasticsearch cluster.\nWhen creating an index, you can specify the following:\n\n* Settings for the index.\n* Mappings for fields in the index.\n* Index aliases\n\n**Wait for active shards**\n\nBy default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out.\nThe index creation response will indicate what happened.\nFor example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out.\nNote that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful.\nThese values simply indicate whether the operation completed before the timeout.\nIf `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon.\nIf `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`).\n\nYou can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`.\nNote that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations.", + "examples": { + "indicesCreateRequestExample1": { + "description": "This request specifies the `number_of_shards` and `number_of_replicas`.", + "summary": "Create an index.", + "value": "{\n \"settings\": {\n \"number_of_shards\": 3,\n \"number_of_replicas\": 2\n }\n}" + }, + "indicesCreateRequestExample2": { + "description": "You can provide mapping definitions in the create index API requests.", + "summary": "Create an index with mappings.", + "value": "{\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"mappings\": {\n \"properties\": {\n \"field1\": { \"type\": \"text\" }\n }\n }\n}" + }, + "indicesCreateRequestExample3": { + "description": "You can provide mapping definitions in the create index API requests. Index alias names also support date math.\n", + "summary": "Create an index with aliases.", + "value": "{\n \"aliases\": {\n \"alias_1\": {},\n \"alias_2\": {\n \"filter\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"routing\": \"shard-1\"\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -134064,6 +134986,12 @@ } ] }, + "examples": { + "indicesDataStreamStatsResponseExample1": { + "description": "A successful response for retrieving statistics for a data stream.", + "value": "{\n \"_shards\": {\n \"total\": 10,\n \"successful\": 5,\n \"failed\": 0\n },\n \"data_stream_count\": 2,\n \"backing_indices\": 5,\n \"total_store_size\": \"7kb\",\n \"total_store_size_bytes\": 7268,\n \"data_streams\": [\n {\n \"data_stream\": \"my-data-stream\",\n \"backing_indices\": 3,\n \"store_size\": \"3.7kb\",\n \"store_size_bytes\": 3772,\n \"maximum_timestamp\": 1607512028000\n },\n {\n \"data_stream\": \"my-data-stream-two\",\n \"backing_indices\": 2,\n \"store_size\": \"3.4kb\",\n \"store_size_bytes\": 3496,\n \"maximum_timestamp\": 1607425567000\n }\n ]\n}" + } + }, "name": { "name": "Response", "namespace": "indices.data_streams_stats" @@ -134368,6 +135296,12 @@ } } }, + "examples": { + "IndicesDeleteDataLifecycleResponseExample1": { + "description": "A successful response for deleting a data stream lifecycle.", + "value": "{\n \"acknowledged\": true\n}" + } + }, "name": { "name": "Response", "namespace": "indices.delete_data_lifecycle" @@ -134750,6 +135684,11 @@ } }, "description": "Downsample an index.\nAggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval.\nFor example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index.\nAll documents within an hour interval are summarized and stored as a single document in the downsample index.\n\nNOTE: Only indices in a time series data stream are supported.\nNeither field nor document level security can be defined on the source index.\nThe source index must be read only (`index.blocks.write: true`).", + "examples": { + "RequestExample1": { + "value": "{\n \"fixed_interval\": \"1d\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -135760,6 +136699,12 @@ } } }, + "examples": { + "indicesFieldUsageStatsResponseExample1": { + "description": "An abbreviated response from `GET /my-index-000001/_field_usage_stats`. The `all_fields` object reports the sums of the usage counts for all fields in the index (on the listed shard).\n", + "value": "{\n \"_shards\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0\n },\n \"my-index-000001\": {\n \"shards\": [\n {\n \"tracking_id\": \"MpOl0QlTQ4SYYhEe6KgJoQ\",\n \"tracking_started_at_millis\": 1625558985010,\n \"routing\": {\n \"state\": \"STARTED\",\n \"primary\": true,\n \"node\": \"gA6KeeVzQkGURFCUyV-e8Q\",\n \"relocating_node\": null\n },\n \"stats\": {\n \"all_fields\": {\n \"any\": \"6\",\n \"inverted_index\": {\n \"terms\": 1,\n \"postings\": 1,\n \"proximity\": 1,\n \"positions\": 0,\n \"term_frequencies\": 1,\n \"offsets\": 0,\n \"payloads\": 0\n },\n \"stored_fields\": 2,\n \"doc_values\": 1,\n \"points\": 0,\n \"norms\": 1,\n \"term_vectors\": 0,\n \"knn_vectors\": 0\n },\n \"fields\": {\n \"_id\": {\n \"any\": 1,\n \"inverted_index\": {\n \"terms\": 1,\n \"postings\": 1,\n \"proximity\": 1,\n \"positions\": 0,\n \"term_frequencies\": 1,\n \"offsets\": 0,\n \"payloads\": 0\n },\n \"stored_fields\": 1,\n \"doc_values\": 0,\n \"points\": 0,\n \"norms\": 0,\n \"term_vectors\": 0,\n \"knn_vectors\": 0\n },\n \"_source\": {},\n \"context\": {},\n \"message.keyword\": {}\n }\n }\n }\n ]\n }\n}" + } + }, "name": { "name": "Response", "namespace": "indices.field_usage_stats" @@ -137004,6 +137949,12 @@ } ] }, + "examples": { + "indicesGetDataStreamResponseExample1": { + "description": "A successful response for retrieving information about a data stream.", + "value": "{\n \"data_streams\": [\n {\n \"name\": \"my-data-stream\",\n \"timestamp_field\": {\n \"name\": \"@timestamp\"\n },\n \"indices\": [\n {\n \"index_name\": \".ds-my-data-stream-2099.03.07-000001\",\n \"index_uuid\": \"xCEhwsp8Tey0-FLNFYVwSg\",\n \"prefer_ilm\": true,\n \"ilm_policy\": \"my-lifecycle-policy\",\n \"managed_by\": \"Index Lifecycle Management\"\n },\n {\n \"index_name\": \".ds-my-data-stream-2099.03.08-000002\",\n \"index_uuid\": \"PA_JquKGSiKcAKBA8DJ5gw\",\n \"prefer_ilm\": true,\n \"ilm_policy\": \"my-lifecycle-policy\",\n \"managed_by\": \"Index Lifecycle Management\"\n }\n ],\n \"generation\": 2,\n \"_meta\": {\n \"my-meta-field\": \"foo\"\n },\n \"status\": \"GREEN\",\n \"next_generation_managed_by\": \"Index Lifecycle Management\",\n \"prefer_ilm\": true,\n \"template\": \"my-index-template\",\n \"ilm_policy\": \"my-lifecycle-policy\",\n \"hidden\": false,\n \"system\": false,\n \"allow_custom_routing\": false,\n \"replicated\": false,\n \"rollover_on_write\": false\n },\n {\n \"name\": \"my-data-stream-two\",\n \"timestamp_field\": {\n \"name\": \"@timestamp\"\n },\n \"indices\": [\n {\n \"index_name\": \".ds-my-data-stream-two-2099.03.08-000001\",\n \"index_uuid\": \"3liBu2SYS5axasRt6fUIpA\",\n \"prefer_ilm\": true,\n \"ilm_policy\": \"my-lifecycle-policy\",\n \"managed_by\": \"Index Lifecycle Management\"\n }\n ],\n \"generation\": 1,\n \"_meta\": {\n \"my-meta-field\": \"foo\"\n },\n \"status\": \"YELLOW\",\n \"next_generation_managed_by\": \"Index Lifecycle Management\",\n \"prefer_ilm\": true,\n \"template\": \"my-index-template\",\n \"ilm_policy\": \"my-lifecycle-policy\",\n \"hidden\": false,\n \"system\": false,\n \"allow_custom_routing\": false,\n \"replicated\": false,\n \"rollover_on_write\": false\n }\n ]\n}" + } + }, "name": { "name": "Response", "namespace": "indices.get_data_stream" @@ -138501,6 +139452,12 @@ } ] }, + "examples": { + "indicesOpenResponseExample1": { + "description": "A successful response for opening an index.", + "value": "{\n \"acknowledged\" : true,\n \"shards_acknowledged\" : true\n}" + } + }, "name": { "name": "Response", "namespace": "indices.open" @@ -138642,6 +139599,11 @@ ] }, "description": "Create or update an alias.\nAdds a data stream or index to an alias.", + "examples": { + "indicesPutAliasRequestExample1": { + "value": "{\n \"actions\": [\n {\n \"add\": {\n \"index\": \"my-data-stream\",\n \"alias\": \"my-alias\"\n }\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -138745,6 +139707,17 @@ } }, "description": "Update data stream lifecycles.\nUpdate the data stream lifecycle of the specified data streams.", + "examples": { + "IndicesPutDataLifecycleRequestExample1": { + "summary": "Set the data stream lifecycle retention", + "value": "{\n \"data_retention\": \"7d\"\n}" + }, + "IndicesPutDataLifecycleRequestExample2": { + "description": "This example configures two downsampling rounds.", + "summary": "Set the data stream lifecycle downsampling", + "value": "{\n \"downsampling\": [\n {\n \"after\": \"1d\",\n \"fixed_interval\": \"10m\"\n },\n {\n \"after\": \"7d\",\n \"fixed_interval\": \"1d\"\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -138824,6 +139797,12 @@ } } }, + "examples": { + "IndicesPutDataLifecycleResponseExample1": { + "description": "A successful response for configuring a data stream lifecycle.", + "value": "{\n \"acknowledged\": true\n}" + } + }, "name": { "name": "Response", "namespace": "indices.put_data_lifecycle" @@ -139046,6 +140025,17 @@ ] }, "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.", + "examples": { + "IndicesPutIndexTemplateRequestExample1": { + "summary": "Create a template", + "value": "{\n \"index_patterns\" : [\"template*\"],\n \"priority\" : 1,\n \"template\": {\n \"settings\" : {\n \"number_of_shards\" : 2\n }\n }\n}" + }, + "IndicesPutIndexTemplateRequestExample2": { + "description": "You can include index aliases in an index template.\nDuring index creation, the `{index}` placeholder in the alias name will be replaced with the actual index name that the template gets applied to.\n", + "summary": "Create a template with aliases", + "value": "{\n \"index_patterns\": [\n \"template*\"\n ],\n \"template\": {\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"aliases\": {\n \"alias1\": {},\n \"alias2\": {\n \"filter\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"routing\": \"shard-1\"\n },\n \"{index}-alias\": {}\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -139325,6 +140315,13 @@ ] }, "description": "Update field mappings.\nAdd new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.", + "examples": { + "indicesPutMappingRequestExample1": { + "description": "The update mapping API can be applied to multiple data streams or indices with a single request. For example, run `PUT /my-index-000001,my-index-000002/_mapping` to update mappings for the `my-index-000001` and `my-index-000002` indices at the same time.\n", + "summary": "Update multiple targets", + "value": "{\n \"properties\": {\n \"user\": {\n \"properties\": {\n \"name\": {\n \"type\": \"keyword\"\n }\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -139466,6 +140463,22 @@ } }, "description": "Update index settings.\nChanges dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.", + "examples": { + "IndicesPutSettingsRequestExample1": { + "summary": "Change a dynamic index setting", + "value": "{\n \"index\" : {\n \"number_of_replicas\" : 2\n }\n}" + }, + "indicesPutSettingsRequestExample2": { + "description": "To revert a setting to the default value, use `null`.", + "summary": "Reset an index setting", + "value": "{\n \"index\" : {\n \"refresh_interval\" : null\n }\n}" + }, + "indicesPutSettingsRequestExample3": { + "description": "To add an analyzer, you must close the index, define the analyzer, then reopen the index.", + "summary": "Update index analysis", + "value": "{\n \"analysis\" : {\n \"analyzer\":{\n \"content\":{\n \"type\":\"custom\",\n \"tokenizer\":\"whitespace\"\n }\n }\n }\n}\n\nPOST /my-index-000001/_open" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -139712,6 +140725,17 @@ ] }, "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Indices matching multiple templates**\n\nMultiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index.\nThe order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them.\nNOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.", + "examples": { + "indicesPutTemplateRequestExample1": { + "summary": "Create an index template", + "value": "{\n \"index_patterns\": [\n \"te*\",\n \"bar*\"\n ],\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"mappings\": {\n \"_source\": {\n \"enabled\": false\n }\n },\n \"properties\": {\n \"host_name\": {\n \"type\": \"keyword\"\n },\n \"created_at\": {\n \"type\": \"date\",\n \"format\": \"EEE MMM dd HH:mm:ss Z yyyy\"\n }\n }\n}" + }, + "indicesPutTemplateRequestExample2": { + "description": "You can include index aliases in an index template. During index creation, the `{index}` placeholder in the alias name will be replaced with the actual index name that the template gets applied to.\n", + "summary": "Create an index template with aliases", + "value": "{\n \"index_patterns\": [\n \"te*\"\n ],\n \"settings\": {\n \"number_of_shards\": 1\n },\n \"aliases\": {\n \"alias1\": {},\n \"alias2\": {\n \"filter\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n },\n \"routing\": \"shard-1\"\n },\n \"{index}-alias\": {}\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -140487,6 +141511,18 @@ } } }, + "examples": { + "indicesRecoveryResponseExample1": { + "description": "A successful response from `GET /_recovery?human`, which gets information about ongoing and completed shard recoveries for all data streams and indices in a cluster. This example includes information about a single index recovering a single shard. The source of the recovery is a snapshot repository and the target of the recovery is the `my_es_node` node. The response also includes the number and percentage of files and bytes recovered.\n", + "summary": "Get segment information", + "value": "{\n \"index1\" : {\n \"shards\" : [ {\n \"id\" : 0,\n \"type\" : \"SNAPSHOT\",\n \"stage\" : \"INDEX\",\n \"primary\" : true,\n \"start_time\" : \"2014-02-24T12:15:59.716\",\n \"start_time_in_millis\": 1393244159716,\n \"stop_time\" : \"0s\",\n \"stop_time_in_millis\" : 0,\n \"total_time\" : \"2.9m\",\n \"total_time_in_millis\" : 175576,\n \"source\" : {\n \"repository\" : \"my_repository\",\n \"snapshot\" : \"my_snapshot\",\n \"index\" : \"index1\",\n \"version\" : \"{version}\",\n \"restoreUUID\": \"PDh1ZAOaRbiGIVtCvZOMww\"\n },\n \"target\" : {\n \"id\" : \"ryqJ5lO5S4-lSFbGntkEkg\",\n \"host\" : \"my.fqdn\",\n \"transport_address\" : \"my.fqdn\",\n \"ip\" : \"10.0.1.7\",\n \"name\" : \"my_es_node\"\n },\n \"index\" : {\n \"size\" : {\n \"total\" : \"75.4mb\",\n \"total_in_bytes\" : 79063092,\n \"reused\" : \"0b\",\n \"reused_in_bytes\" : 0,\n \"recovered\" : \"65.7mb\",\n \"recovered_in_bytes\" : 68891939,\n \"recovered_from_snapshot\" : \"0b\",\n \"recovered_from_snapshot_in_bytes\" : 0,\n \"percent\" : \"87.1%\"\n },\n \"files\" : {\n \"total\" : 73,\n \"reused\" : 0,\n \"recovered\" : 69,\n \"percent\" : \"94.5%\"\n },\n \"total_time\" : \"0s\",\n \"total_time_in_millis\" : 0,\n \"source_throttle_time\" : \"0s\",\n \"source_throttle_time_in_millis\" : 0,\n \"target_throttle_time\" : \"0s\",\n \"target_throttle_time_in_millis\" : 0\n },\n \"translog\" : {\n \"recovered\" : 0,\n \"total\" : 0,\n \"percent\" : \"100.0%\",\n \"total_on_start\" : 0,\n \"total_time\" : \"0s\",\n \"total_time_in_millis\" : 0\n },\n \"verify_index\" : {\n \"check_index_time\" : \"0s\",\n \"check_index_time_in_millis\" : 0,\n \"total_time\" : \"0s\",\n \"total_time_in_millis\" : 0\n }\n } ]\n }\n}" + }, + "indicesRecoveryResponseExample2": { + "description": "A successful response from `GET _recovery?human&detailed=true`. The response includes a listing of any physical files recovered and their sizes. The response also includes timings in milliseconds of the various stages of recovery: index retrieval, translog replay, and index start time. This response indicates the recovery is done.\n", + "summary": "Get detailed recovery information", + "value": "{\n \"index1\" : {\n \"shards\" : [ {\n \"id\" : 0,\n \"type\" : \"EXISTING_STORE\",\n \"stage\" : \"DONE\",\n \"primary\" : true,\n \"start_time\" : \"2014-02-24T12:38:06.349\",\n \"start_time_in_millis\" : \"1393245486349\",\n \"stop_time\" : \"2014-02-24T12:38:08.464\",\n \"stop_time_in_millis\" : \"1393245488464\",\n \"total_time\" : \"2.1s\",\n \"total_time_in_millis\" : 2115,\n \"source\" : {\n \"id\" : \"RGMdRc-yQWWKIBM4DGvwqQ\",\n \"host\" : \"my.fqdn\",\n \"transport_address\" : \"my.fqdn\",\n \"ip\" : \"10.0.1.7\",\n \"name\" : \"my_es_node\"\n },\n \"target\" : {\n \"id\" : \"RGMdRc-yQWWKIBM4DGvwqQ\",\n \"host\" : \"my.fqdn\",\n \"transport_address\" : \"my.fqdn\",\n \"ip\" : \"10.0.1.7\",\n \"name\" : \"my_es_node\"\n },\n \"index\" : {\n \"size\" : {\n \"total\" : \"24.7mb\",\n \"total_in_bytes\" : 26001617,\n \"reused\" : \"24.7mb\",\n \"reused_in_bytes\" : 26001617,\n \"recovered\" : \"0b\",\n \"recovered_in_bytes\" : 0,\n \"recovered_from_snapshot\" : \"0b\",\n \"recovered_from_snapshot_in_bytes\" : 0,\n \"percent\" : \"100.0%\"\n },\n \"files\" : {\n \"total\" : 26,\n \"reused\" : 26,\n \"recovered\" : 0,\n \"percent\" : \"100.0%\",\n \"details\" : [ {\n \"name\" : \"segments.gen\",\n \"length\" : 20,\n \"recovered\" : 20\n }, {\n \"name\" : \"_0.cfs\",\n \"length\" : 135306,\n \"recovered\" : 135306,\n \"recovered_from_snapshot\": 0\n }, {\n \"name\" : \"segments_2\",\n \"length\" : 251,\n \"recovered\" : 251,\n \"recovered_from_snapshot\": 0\n }\n ]\n },\n \"total_time\" : \"2ms\",\n \"total_time_in_millis\" : 2,\n \"source_throttle_time\" : \"0s\",\n \"source_throttle_time_in_millis\" : 0,\n \"target_throttle_time\" : \"0s\",\n \"target_throttle_time_in_millis\" : 0\n },\n \"translog\" : {\n \"recovered\" : 71,\n \"total\" : 0,\n \"percent\" : \"100.0%\",\n \"total_on_start\" : 0,\n \"total_time\" : \"2.0s\",\n \"total_time_in_millis\" : 2025\n },\n \"verify_index\" : {\n \"check_index_time\" : 0,\n \"check_index_time_in_millis\" : 0,\n \"total_time\" : \"88ms\",\n \"total_time_in_millis\" : 88\n }\n } ]\n }\n}" + } + }, "name": { "name": "Response", "namespace": "indices.recovery" @@ -141050,6 +142086,11 @@ "kind": "no_body" }, "description": "Reload search analyzers.\nReload an index's search analyzers and their resources.\nFor data streams, the API reloads search analyzers and resources for the stream's backing indices.\n\nIMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.\n\nYou can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer.\nTo be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers.\n\nNOTE: This API does not perform a reload for each shard of an index.\nInstead, it performs a reload for each node containing index shards.\nAs a result, the total shard count returned by the API can differ from the number of index shards.\nBecause reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API.\nThis ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.", + "examples": { + "ReloadSearchAnalyzersRequestExample1": { + "value": "{\n \"_shards\": {\n \"total\": 2,\n \"successful\": 2,\n \"failed\": 0\n },\n \"reload_details\": [\n {\n \"index\": \"my-index-000001\",\n \"reloaded_analyzers\": [\n \"my_synonyms\"\n ],\n \"reloaded_node_ids\": [\n \"mfdqTXn_T7SGr2Ho2KT8uw\"\n ]\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -141331,6 +142372,18 @@ } } }, + "examples": { + "ResolveClusterResponseExample1": { + "description": "A successful response from `GET /_resolve/cluster/my-index*,clust*:my-index*`. Each cluster has its own response section. The cluster you sent the request to is labelled as \"(local)\".\n", + "summary": "Resolve with wildcards", + "value": "{\n \"(local)\": {\n \"connected\": true,\n \"skip_unavailable\": false,\n \"matching_indices\": true,\n \"version\": {\n \"number\": \"8.13.0\",\n \"build_flavor\": \"default\",\n \"minimum_wire_compatibility_version\": \"7.17.0\",\n \"minimum_index_compatibility_version\": \"7.0.0\"\n }\n },\n \"cluster_one\": {\n \"connected\": true,\n \"skip_unavailable\": true,\n \"matching_indices\": true,\n \"version\": {\n \"number\": \"8.13.0\",\n \"build_flavor\": \"default\",\n \"minimum_wire_compatibility_version\": \"7.17.0\",\n \"minimum_index_compatibility_version\": \"7.0.0\"\n }\n },\n \"cluster_two\": {\n \"connected\": true,\n \"skip_unavailable\": false,\n \"matching_indices\": true,\n \"version\": {\n \"number\": \"8.13.0\",\n \"build_flavor\": \"default\",\n \"minimum_wire_compatibility_version\": \"7.17.0\",\n \"minimum_index_compatibility_version\": \"7.0.0\"\n }\n }\n}" + }, + "ResolveClusterResponseExample2": { + "description": "A successful response from `GET /_resolve/cluster/not-present,clust*:my-index*,oldcluster:*?ignore_unavailable=false&timeout=5s`. This type of request can be used to identify potential problems with your cross-cluster search. Note also that a `timeout` of 5 seconds is sent, which sets the maximum time the query will wait for remote clusters to respond. The local cluster has no index called `not_present`. Searching with `ignore_unavailable=false` would return a \"no such index\" error. The `cluster_one` remote cluster has no indices that match the pattern `my-index*`. There may be no indices that match the pattern or the index could be closed. The `cluster_two` remote cluster is not connected (the attempt to connect failed). Since this cluster is marked as `skip_unavailable=false`, you should probably exclude this cluster from the search by adding `-cluster_two:*` to the search index expression. For `cluster_three`, the error message indicates that this remote cluster did not respond within the 5-second timeout window specified, so it is also marked as not connected. The `oldcluster` remote cluster shows that it has matching indices, but no version information is included. This indicates that the cluster version predates the introduction of the `_resolve/cluster` API, so you may want to exclude it from your cross-cluster search.\n", + "summary": "Identify search problems", + "value": "{\n \"(local)\": {\n \"connected\": true,\n \"skip_unavailable\": false,\n \"error\": \"no such index [not_present]\"\n },\n \"cluster_one\": {\n \"connected\": true,\n \"skip_unavailable\": true,\n \"matching_indices\": false,\n \"version\": {\n \"number\": \"8.13.0\",\n \"build_flavor\": \"default\",\n \"minimum_wire_compatibility_version\": \"7.17.0\",\n \"minimum_index_compatibility_version\": \"7.0.0\"\n }\n },\n \"cluster_two\": {\n \"connected\": false,\n \"skip_unavailable\": false\n },\n \"cluster_three\": {\n \"connected\": false,\n \"skip_unavailable\": false,\n \"error\": \"Request timed out before receiving a response from the remote cluster\"\n },\n \"oldcluster\": {\n \"connected\": true,\n \"skip_unavailable\": false,\n \"matching_indices\": true\n }\n}" + } + }, "name": { "name": "Response", "namespace": "indices.resolve_cluster" @@ -141680,6 +142733,12 @@ ] }, "description": "Roll over to a new index.\nTIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.", + "examples": { + "indicesRolloverRequestExample1": { + "summary": "Create a new index for a data stream.", + "value": "{\n \"conditions\": {\n \"max_age\": \"7d\",\n \"max_docs\": 1000,\n \"max_primary_shard_size\": \"50gb\",\n \"max_primary_shard_docs\": \"2000\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -141867,6 +142926,12 @@ } ] }, + "examples": { + "indicesRolloverResponseExample1": { + "description": "An abbreviated response from `GET /_segments`.\n", + "value": "{\n \"_shards\": {},\n \"indices\": {\n \"test\": {\n \"shards\": {\n \"0\": [\n {\n \"routing\": {\n \"state\": \"STARTED\",\n \"primary\": true,\n \"node\": \"zDC_RorJQCao9xf9pg3Fvw\"\n },\n \"num_committed_segments\": 0,\n \"num_search_segments\": 1,\n \"segments\": {\n \"_0\": {\n \"generation\": 0,\n \"num_docs\": 1,\n \"deleted_docs\": 0,\n \"size_in_bytes\": 3800,\n \"committed\": false,\n \"search\": true,\n \"version\": \"7.0.0\",\n \"compound\": true,\n \"attributes\": {}\n }\n }\n }\n ]\n }\n }\n }\n}" + } + }, "name": { "name": "Response", "namespace": "indices.rollover" @@ -142220,6 +143285,12 @@ } ] }, + "examples": { + "indicesSegmentsResponseExample1": { + "description": "A successful response for creating a new index for a data stream.", + "value": "{\n \"acknowledged\": true,\n \"shards_acknowledged\": true,\n \"old_index\": \".ds-my-data-stream-2099.05.06-000001\",\n \"new_index\": \".ds-my-data-stream-2099.05.07-000002\",\n \"rolled_over\": true,\n \"dry_run\": false,\n \"lazy\": false,\n \"conditions\": {\n \"[max_age: 7d]\": false,\n \"[max_docs: 1000]\": true,\n \"[max_primary_shard_size: 50gb]\": false,\n \"[max_primary_shard_docs: 2000]\": false\n }\n}" + } + }, "name": { "name": "Response", "namespace": "indices.segments" @@ -142617,6 +143688,12 @@ } ] }, + "examples": { + "indicesShardStoresResponseExample1": { + "description": "An abbreviated response from `GET /_shard_stores?status=green`.\n", + "value": "{\n \"indices\": {\n \"my-index-000001\": {\n \"shards\": {\n \"0\": {\n \"stores\": [\n {\n \"sPa3OgxLSYGvQ4oPs-Tajw\": {\n \"name\": \"node_t0\",\n \"ephemeral_id\": \"9NlXRFGCT1m8tkvYCMK-8A\",\n \"transport_address\": \"local[1]\",\n \"external_id\": \"node_t0\",\n \"attributes\": {},\n \"roles\": [],\n \"version\": \"8.10.0\",\n \"min_index_version\": 7000099,\n \"max_index_version\": 8100099\n },\n \"allocation_id\": \"2iNySv_OQVePRX-yaRH_lQ\",\n \"allocation\": \"primary\",\n \"store_exception\": {}\n }\n ]\n }\n }\n }\n }\n}" + } + }, "name": { "name": "Response", "namespace": "indices.shard_stores" @@ -142941,6 +144018,12 @@ ] }, "description": "Shrink an index.\nShrink an index into a new index with fewer primary shards.\n\nBefore you can shrink an index:\n\n* The index must be read-only.\n* A copy of every shard in the index must reside on the same node.\n* The index must have a green health status.\n\nTo make shard allocation easier, we recommend you also remove the index's replica shards.\nYou can later re-add replica shards as part of the shrink operation.\n\nThe requested number of primary shards in the target index must be a factor of the number of shards in the source index.\nFor example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1.\nIf the number of shards in the index is a prime number it can only be shrunk into a single primary shard\n Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.\n\nThe current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.\n\nA shrink operation:\n\n* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.\n* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting.\n\nIMPORTANT: Indices can only be shrunk if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have more primary shards than the target index.\n* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.\n* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.\n* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.", + "examples": { + "indicesShrinkRequestExample1": { + "summary": "Shrink an existing index into a new index with fewer primary shards.", + "value": "{\n \"settings\": {\n \"index.routing.allocation.require._name\": null,\n \"index.blocks.write\": null\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -143170,6 +144253,12 @@ } ] }, + "examples": { + "indicesSimulateIndexTemplateResponseExample1": { + "description": "A successful response from `POST /_index_template/_simulate_index/my-index-000001`.", + "value": "{\n \"template\" : {\n \"settings\" : {\n \"index\" : {\n \"number_of_shards\" : \"2\",\n \"number_of_replicas\" : \"0\",\n \"routing\" : {\n \"allocation\" : {\n \"include\" : {\n \"_tier_preference\" : \"data_content\"\n }\n }\n }\n }\n },\n \"mappings\" : {\n \"properties\" : {\n \"@timestamp\" : {\n \"type\" : \"date\"\n }\n }\n },\n \"aliases\" : { }\n },\n \"overlapping\" : [\n {\n \"name\" : \"template_1\",\n \"index_patterns\" : [\n \"my-index-*\"\n ]\n }\n ]\n}" + } + }, "name": { "name": "Response", "namespace": "indices.simulate_index_template" @@ -143352,6 +144441,12 @@ ] }, "description": "Simulate an index template.\nGet the index configuration that would be applied by a particular index template.", + "examples": { + "indicesSimulateTemplateRequestExample1": { + "description": "To see what settings will be applied by a template before you add it to the cluster, you can pass a template configuration in the request body. The specified template is used for the simulation if it has a higher priority than existing templates.\n", + "value": "{\n \"index_patterns\": [\"my-index-*\"],\n \"composed_of\": [\"ct2\"],\n \"priority\": 10,\n \"template\": {\n \"settings\": {\n \"index.number_of_replicas\": 1\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -143460,6 +144555,12 @@ } ] }, + "examples": { + "indicesSimulateTemplateResponseExample2": { + "description": "A successful response from `POST /_index_template/_simulate` with a template configuration in the request body. The response shows any overlapping templates with a lower priority.\n", + "value": "{\n \"template\" : {\n \"settings\" : {\n \"index\" : {\n \"number_of_replicas\" : \"1\",\n \"routing\" : {\n \"allocation\" : {\n \"include\" : {\n \"_tier_preference\" : \"data_content\"\n }\n }\n }\n }\n },\n \"mappings\" : {\n \"properties\" : {\n \"@timestamp\" : {\n \"type\" : \"date\"\n }\n }\n },\n \"aliases\" : { }\n },\n \"overlapping\" : [\n {\n \"name\" : \"final-template\",\n \"index_patterns\" : [\n \"my-index-*\"\n ]\n }\n ]\n}" + } + }, "name": { "name": "Response", "namespace": "indices.simulate_template" @@ -143573,6 +144674,12 @@ ] }, "description": "Split an index.\nSplit an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nYou can do make an index read-only with the following request using the add index block API:\n\n```\nPUT /my_source_index/_block/write\n```\n\nThe current write index on a data stream cannot be split.\nIn order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", + "examples": { + "indicesSplitRequestExample1": { + "description": "Split an existing index into a new index with more primary shards.", + "value": "{\n \"settings\": {\n \"index.number_of_shards\": 2\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -146596,6 +147703,28 @@ ] }, "description": "Perform inference on the service.\n\nThis API enables you to use machine learning models to perform specific tasks on data that you provide as an input.\nIt returns a response with the results of the tasks.\nThe inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.\n\n> info\n> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "examples": { + "InferenceRequestExample1": { + "description": "Run `POST _inference/completion/openai_chat_completions` to perform a completion on the example question.", + "summary": "Completion task", + "value": "{\n \"input\": \"What is Elastic?\"\n}" + }, + "InferenceRequestExample2": { + "description": "Run `POST _inference/rerank/cohere_rerank` to perform reranking on the example input.", + "summary": "Rerank task", + "value": "{\n \"input\": [\"luke\", \"like\", \"leia\", \"chewy\",\"r2d2\", \"star\", \"wars\"],\n \"query\": \"star wars main character\"\n}" + }, + "InferenceRequestExample3": { + "description": "Run `POST _inference/sparse_embedding/my-elser-model` to perform sparse embedding on the example sentence.", + "summary": "Sparse embedding task", + "value": "{\n \"input\": \"The sky above the port was the color of television tuned to a dead channel.\"\n}" + }, + "InferenceRequestExample4": { + "description": "Run `POST _inference/text_embedding/my-cohere-endpoint` to perform text embedding on the example sentence using the Cohere integration,", + "summary": "Text embedding task", + "value": "{\n \"input\": \"The sky above the port was the color of television tuned to a dead channel.\",\n \"task_settings\": {\n \"input_type\": \"ingest\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -146779,6 +147908,13 @@ ] }, "description": "Perform streaming inference.\nGet real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation.\nThis API works only with the completion task type.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThis API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming.", + "examples": { + "StreamInferenceRequestExample1": { + "description": "Run `POST _inference/completion/openai-completion/_stream` to perform a completion on the example question with streaming.", + "summary": "Perform a completion task", + "value": "{\n \"input\": \"What is Elastic?\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -153492,6 +154628,12 @@ } } }, + "examples": { + "GetPipelineResponseExample1": { + "description": "A successful response for retrieving information about an ingest pipeline.", + "value": "{\n \"my-pipeline-id\" : {\n \"description\" : \"describe pipeline\",\n \"version\" : 123,\n \"processors\" : [\n {\n \"set\" : {\n \"field\" : \"foo\",\n \"value\" : \"bar\"\n }\n }\n ]\n }\n}" + } + }, "name": { "name": "Response", "namespace": "ingest.get_pipeline" @@ -153842,6 +154984,17 @@ ] }, "description": "Create or update a pipeline.\nChanges made using this API take effect immediately.", + "examples": { + "PutPipelineRequestExample1": { + "summary": "Create an ingest pipeline.", + "value": "{\n \"description\" : \"My optional pipeline description\",\n \"processors\" : [\n {\n \"set\" : {\n \"description\" : \"My optional processor description\",\n \"field\": \"my-keyword-field\",\n \"value\": \"foo\"\n }\n }\n ]\n}" + }, + "PutPipelineRequestExample2": { + "description": "You can use the `_meta` parameter to add arbitrary metadata to a pipeline.", + "summary": "Create an ingest pipeline with metadata.", + "value": "{\n \"description\" : \"My optional pipeline description\",\n \"processors\" : [\n {\n \"set\" : {\n \"description\" : \"My optional processor description\",\n \"field\": \"my-keyword-field\",\n \"value\": \"foo\"\n }\n }\n ],\n \"_meta\": {\n \"reason\": \"set my-keyword-field to foo\",\n \"serialization\": {\n \"class\": \"MyPipeline\",\n \"id\": 10\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -153964,6 +155117,13 @@ ] }, "description": "Simulate a pipeline.\n\nRun an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "examples": { + "SimulatePipelineRequestExample1": { + "description": "You can specify the used pipeline either in the request body or as a path parameter.", + "summary": "Run an ingest pipeline against a set of provided documents.", + "value": "{\n \"pipeline\" :\n {\n \"description\": \"_description\",\n \"processors\": [\n {\n \"set\" : {\n \"field\" : \"field2\",\n \"value\" : \"_value\"\n }\n }\n ]\n },\n \"docs\": [\n {\n \"_index\": \"index\",\n \"_id\": \"id\",\n \"_source\": {\n \"foo\": \"bar\"\n }\n },\n {\n \"_index\": \"index\",\n \"_id\": \"id\",\n \"_source\": {\n \"foo\": \"rab\"\n }\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -154629,6 +155789,12 @@ } ] }, + "examples": { + "GetBasicLicenseStatusResponseExample1": { + "description": "A successful response from `GET /_license/basic_status`.", + "value": "{\n \"eligible_to_start_basic\": true\n}" + } + }, "name": { "name": "Response", "namespace": "license.get_basic_status" @@ -154676,6 +155842,12 @@ } ] }, + "examples": { + "GetTrialLicenseStatusResponseExample1": { + "description": "A successful response from `GET /_license/trial_status`.", + "value": "{\n \"eligible_to_start_trial\": true\n}" + } + }, "name": { "name": "Response", "namespace": "license.get_trial_status" @@ -154754,6 +155926,12 @@ ] }, "description": "Update the license.\n\nYou can update your license at runtime without shutting down your nodes.\nLicense updates take effect immediately.\nIf the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response.\nYou must then re-submit the API request with the acknowledge parameter set to true.\n\nNOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license.\nIf the operator privileges feature is enabled, only operator users can use this API.", + "examples": { + "PostLicenseRequestExample1": { + "description": "Run `PUT _license` to update to a basic license. NOTE: These values are invalid; you must substitute the appropriate contents from your license file.\n", + "value": "{\n \"licenses\": [\n {\n \"uid\":\"893361dc-9749-4997-93cb-802e3d7fa4xx\",\n \"type\":\"basic\",\n \"issue_date_in_millis\":1411948800000,\n \"expiry_date_in_millis\":1914278399999,\n \"max_nodes\":1,\n \"issued_to\":\"issuedTo\",\n \"issuer\":\"issuer\",\n \"signature\":\"xx\"\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -154848,6 +156026,12 @@ } ] }, + "examples": { + "PostLicenseResponseExample1": { + "description": "If you update to a basic license and you previously had a license with more features, you receive this type of response. You must re-submit the API request and set the `acknowledge` parameter to `true`.", + "value": "{\n \"acknowledged\": false,\n \"license_status\": \"valid\",\n \"acknowledge\": {\n \"message\": \"\\\"\\\"\\\"This license update requires acknowledgement. To acknowledge the license, please read the following messages and update the license again, this time with the \\\"acknowledge=true\\\" parameter:\\\"\\\"\\\"\",\n \"watcher\": [\n \"Watcher will be disabled\"\n ],\n \"logstash\": [\n \"Logstash will no longer poll for centrally-managed pipelines\"\n ],\n \"security\": [\n \"The following X-Pack security functionality will be disabled ...\"\n ]\n }\n}" + } + }, "name": { "name": "Response", "namespace": "license.post" @@ -155004,6 +156188,12 @@ } ] }, + "examples": { + "StartBasicLicenseResponseExample1": { + "description": "A successful response from `POST /_license/start_basic?acknowledge=true`. If you currently have a license with more features than a basic license and you start a basic license, you must pass the acknowledge parameter.", + "value": "{\n \"basic_was_started\": true,\n \"acknowledged\": true\n}" + } + }, "name": { "name": "Response", "namespace": "license.post_start_basic" @@ -155121,6 +156311,12 @@ } ] }, + "examples": { + "StartTrialLicenseResponseExample1": { + "description": "A successful response from `POST /_license/start_trial?acknowledge=true`.", + "value": "{\n \"trial_was_started\": true,\n \"acknowledged\": true\n}" + } + }, "name": { "name": "Response", "namespace": "license.post_start_trial" @@ -155449,6 +156645,12 @@ } } }, + "examples": { + "LogstashGetPipelineResponseExample1": { + "description": "A successful response from `GET _logstash/pipeline/my_pipeline`.\n", + "value": "{\n \"my_pipeline\": {\n \"description\": \"Sample pipeline for illustration purposes\",\n \"last_modified\": \"2021-01-02T02:50:51.250Z\",\n \"pipeline_metadata\": {\n \"type\": \"logstash_pipeline\",\n \"version\": \"1\"\n },\n \"username\": \"elastic\",\n \"pipeline\": \"input {}\\\\n filter { grok {} }\\\\n output {}\",\n \"pipeline_settings\": {\n \"pipeline.workers\": 1,\n \"pipeline.batch.size\": 125,\n \"pipeline.batch.delay\": 50,\n \"queue.type\": \"memory\",\n \"queue.max_bytes\": \"1gb\",\n \"queue.checkpoint.writes\": 1024\n }\n }\n}" + } + }, "name": { "name": "Response", "namespace": "logstash.get_pipeline" @@ -155472,6 +156674,13 @@ } }, "description": "Create or update a Logstash pipeline.\n\nCreate a pipeline that is used for Logstash Central Management.\nIf the specified pipeline exists, it is replaced.", + "examples": { + "LogstashPutPipelineRequestExample1": { + "description": "Run `PUT _logstash/pipeline/my_pipeline` to create a pipeline.", + "summary": "Create a pipeline", + "value": "{\n \"description\": \"Sample pipeline for illustration purposes\",\n \"last_modified\": \"2021-01-02T02:50:51.250Z\",\n \"pipeline_metadata\": {\n \"type\": \"logstash_pipeline\",\n \"version\": 1\n },\n \"username\": \"elastic\",\n \"pipeline\": \"input {}\\\\n filter { grok {} }\\\\n output {}\",\n \"pipeline_settings\": {\n \"pipeline.workers\": 1,\n \"pipeline.batch.size\": 125,\n \"pipeline.batch.delay\": 50,\n \"queue.type\": \"memory\",\n \"queue.max_bytes\": \"1gb\",\n \"queue.checkpoint.writes\": 1024\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -155812,6 +157021,12 @@ } ] }, + "examples": { + "DeprecationInfoResponseExample1": { + "description": "An abbreviated response from `GET /_migration/deprecations`.\n", + "value": "{\n \"cluster_settings\": [\n {\n \"level\": \"critical\",\n \"message\": \"Cluster name cannot contain ':'\",\n \"url\": \"https://www.elastic.co/guide/en/elasticsearch/reference/7.0/breaking-changes-7.0.html#_literal_literal_is_no_longer_allowed_in_cluster_name\",\n \"details\": \"This cluster is named [mycompany:logging], which contains the illegal character ':'.\"\n }\n ],\n \"node_settings\": [],\n \"index_settings\": {\n \"logs:apache\": [\n {\n \"level\": \"warning\",\n \"message\": \"Index name cannot contain ':'\",\n \"url\": \"https://www.elastic.co/guide/en/elasticsearch/reference/7.0/breaking-changes-7.0.html#_literal_literal_is_no_longer_allowed_in_index_name\",\n \"details\": \"This index is named [logs:apache], which contains the illegal character ':'.\"\n }\n ]\n },\n \"ml_settings\": []\n}" + } + }, "name": { "name": "Response", "namespace": "migration.deprecations" @@ -169831,6 +171046,12 @@ ] }, "description": "Estimate job model memory usage.\n\nMake an estimation of the memory usage for an anomaly detection job model.\nThe estimate is based on analysis configuration details for the job and cardinality\nestimates for the fields it references.", + "examples": { + "MlEstimateModelMemoryRequestExample1": { + "description": "Run `POST _ml/anomaly_detectors/_estimate_model_memory` to estimate the model memory limit based on the analysis configuration details provided in the request body.", + "value": "{\n \"analysis_config\": {\n \"bucket_span\": \"5m\",\n \"detectors\": [\n {\n \"function\": \"sum\",\n \"field_name\": \"bytes\",\n \"by_field_name\": \"status\",\n \"partition_field_name\": \"app\"\n }\n ],\n \"influencers\": [\n \"source_ip\",\n \"dest_ip\"\n ]\n },\n \"overall_cardinality\": {\n \"status\": 10,\n \"app\": 50\n },\n \"max_bucket_cardinality\": {\n \"source_ip\": 300,\n \"dest_ip\": 30\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -170550,6 +171771,33 @@ ] }, "description": "Evaluate data frame analytics.\n\nThe API packages together commonly used evaluation metrics for various types\nof machine learning features. This has been designed for use on indexes\ncreated by data frame analytics. Evaluation requires both a ground truth\nfield and an analytics result field to be present.", + "examples": { + "MlEvaluateDataFrameRequestExample1": { + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate a a classification job for an annotated index. The `actual_field` contains the ground truth for classification. The `predicted_field` contains the predicted value calculated by the classification analysis.\n", + "summary": "Classification example 1", + "value": "{\n \"index\": \"animal_classification\",\n \"evaluation\": {\n \"classification\": {\n \"actual_field\": \"animal_class\",\n \"predicted_field\": \"ml.animal_class_prediction\",\n \"metrics\": {\n \"multiclass_confusion_matrix\": {}\n }\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample2": { + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate a classification job with AUC ROC metrics for an annotated index. The `actual_field` contains the ground truth value for the actual animal classification. This is required in order to evaluate results. The `class_name` specifies the class name that is treated as positive during the evaluation, all the other classes are treated as negative.\n", + "summary": "Classification example 2", + "value": "{\n \"index\": \"animal_classification\",\n \"evaluation\": {\n \"classification\": {\n \"actual_field\": \"animal_class\",\n \"metrics\": {\n \"auc_roc\": {\n \"class_name\": \"dog\"\n }\n }\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample3": { + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate an outlier detection job for an annotated index.\n", + "summary": "Outlier detection", + "value": "{\n \"index\": \"my_analytics_dest_index\",\n \"evaluation\": {\n \"outlier_detection\": {\n \"actual_field\": \"is_outlier\",\n \"predicted_probability_field\": \"ml.outlier_score\"\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample4": { + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate the testing error of a regression job for an annotated index. The term query in the body limits evaluation to be performed on the test split only. The `actual_field` contains the ground truth for house prices. The `predicted_field` contains the house price calculated by the regression analysis.\n", + "summary": "Regression example 1", + "value": "{\n \"index\": \"house_price_predictions\",\n \"query\": {\n \"bool\": {\n \"filter\": [\n {\n \"term\": {\n \"ml.is_training\": false\n }\n }\n ]\n },\n \"evaluation\": {\n \"regression\": {\n \"actual_field\": \"price\",\n \"predicted_field\": \"ml.price_prediction\",\n \"metrics\": {\n \"r_squared\": {},\n \"mse\": {},\n \"msle\": {\n \"offset\": 10\n },\n \"huber\": {\n \"delta\": 1.5\n }\n }\n }\n }\n }\n}" + }, + "MlEvaluateDataFrameRequestExample5": { + "description": "Run `POST _ml/data_frame/_evaluate` to evaluate the training error of a regression job for an annotated index. The term query in the body limits evaluation to be performed on the training split only. The `actual_field` contains the ground truth for house prices. The `predicted_field` contains the house price calculated by the regression analysis.\n", + "summary": "Regression example 2", + "value": "{\n \"index\": \"house_price_predictions\",\n \"query\": {\n \"term\": {\n \"ml.is_training\": {\n \"value\": true\n }\n }\n },\n \"evaluation\": {\n \"regression\": {\n \"actual_field\": \"price\",\n \"predicted_field\": \"ml.price_prediction\",\n \"metrics\": {\n \"r_squared\": {},\n \"mse\": {},\n \"msle\": {},\n \"huber\": {}\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -170727,6 +171975,12 @@ ] }, "description": "Explain data frame analytics config.\n\nThis API provides explanations for a data frame analytics config that either\nexists already or one that has not been created yet. The following\nexplanations are provided:\n* which fields are included or not in the analysis and why,\n* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on.\nIf you have object fields or fields that are excluded via source filtering, they are not included in the explanation.", + "examples": { + "MlExplainDataFrameAnalyticsRequestExample1": { + "description": "Run `POST _ml/data_frame/analytics/_explain` to explain a data frame analytics job configuration.", + "value": "{\n \"source\": {\n \"index\": \"houses_sold_last_10_yrs\"\n },\n \"analysis\": {\n \"regression\": {\n \"dependent_variable\": \"price\"\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -174940,6 +176194,12 @@ ] }, "description": "Open anomaly detection jobs.\n\nAn anomaly detection job must be opened to be ready to receive and analyze\ndata. It can be opened and closed multiple times throughout its lifecycle.\nWhen you open a new job, it starts with an empty model.\nWhen you open an existing job, the most recent model state is automatically\nloaded. The job is ready to resume its analysis from where it left off, once\nnew data is received.", + "examples": { + "MlOpenJobRequestExample1": { + "description": "A request to open anomaly detection jobs. The timeout specifies to wait 35 minutes for the job to open.\n", + "value": "{\n \"timeout\": \"35m\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -175011,6 +176271,12 @@ } ] }, + "examples": { + "MlOpenJobResponseExample1": { + "description": "A successful response when opening an anomaly detection job.", + "value": "{\n \"opened\": true,\n \"node\": \"node-1\"\n}" + } + }, "name": { "name": "Response", "namespace": "ml.open_job" @@ -177099,6 +178365,12 @@ ] }, "description": "Create an anomaly detection job.\n\nIf you include a `datafeed_config`, you must have read index privileges on the source index.\nIf you include a `datafeed_config` but do not provide a query, the datafeed uses `{\"match_all\": {\"boost\": 1}}`.", + "examples": { + "MlPutJobRequestExample1": { + "description": "A request to create an anomaly detection job and datafeed.", + "value": "{\n \"analysis_config\": {\n \"bucket_span\": \"15m\",\n \"detectors\": [\n {\n \"detector_description\": \"Sum of bytes\",\n \"function\": \"sum\",\n \"field_name\": \"bytes\"\n }\n ]\n },\n \"data_description\": {\n \"time_field\": \"timestamp\",\n \"time_format\": \"epoch_ms\"\n },\n \"analysis_limits\": {\n \"model_memory_limit\": \"11MB\"\n },\n \"model_plot_config\": {\n \"enabled\": true,\n \"annotations_enabled\": true\n },\n \"results_index_name\": \"test-job1\",\n \"datafeed_config\": {\n \"indices\": [\n \"kibana_sample_data_logs\"\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match_all\": {}\n }\n ]\n }\n },\n \"runtime_mappings\": {\n \"hour_of_day\": {\n \"type\": \"long\",\n \"script\": {\n \"source\": \"emit(doc['timestamp'].value.getHour());\"\n }\n }\n },\n \"datafeed_id\": \"datafeed-test-job1\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -177413,6 +178685,12 @@ } ] }, + "examples": { + "MlPutJobResponseExample1": { + "description": "A successful response when creating an anomaly detection job and datafeed.", + "value": "{\n \"job_id\": \"test-job1\",\n \"job_type\": \"anomaly_detector\",\n \"job_version\": \"8.4.0\",\n \"create_time\": 1656087283340,\n \"datafeed_config\": {\n \"datafeed_id\": \"datafeed-test-job1\",\n \"job_id\": \"test-job1\",\n \"authorization\": {\n \"roles\": [\n \"superuser\"\n ]\n },\n \"query_delay\": \"61499ms\",\n \"chunking_config\": {\n \"mode\": \"auto\"\n },\n \"indices_options\": {\n \"expand_wildcards\": [\n \"open\"\n ],\n \"ignore_unavailable\": false,\n \"allow_no_indices\": true,\n \"ignore_throttled\": true\n },\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match_all\": {}\n }\n ]\n }\n },\n \"indices\": [\n \"kibana_sample_data_logs\"\n ],\n \"scroll_size\": 1000,\n \"delayed_data_check_config\": {\n \"enabled\": true\n },\n \"runtime_mappings\": {\n \"hour_of_day\": {\n \"type\": \"long\",\n \"script\": {\n \"source\": \"emit(doc['timestamp'].value.getHour());\"\n }\n }\n }\n },\n \"analysis_config\": {\n \"bucket_span\": \"15m\",\n \"detectors\": [\n {\n \"detector_description\": \"Sum of bytes\",\n \"function\": \"sum\",\n \"field_name\": \"bytes\",\n \"detector_index\": 0\n }\n ],\n \"influencers\": [],\n \"model_prune_window\": \"30d\"\n },\n \"analysis_limits\": {\n \"model_memory_limit\": \"11mb\",\n \"categorization_examples_limit\": 4\n },\n \"data_description\": {\n \"time_field\": \"timestamp\",\n \"time_format\": \"epoch_ms\"\n },\n \"model_plot_config\": {\n \"enabled\": true,\n \"annotations_enabled\": true\n },\n \"model_snapshot_retention_days\": 10,\n \"daily_model_snapshot_retention_after_days\": 1,\n \"results_index_name\": \"custom-test-job1\",\n \"allow_lazy_open\": false\n}" + } + }, "name": { "name": "Response", "namespace": "ml.put_job" @@ -189736,6 +191014,12 @@ } } }, + "examples": { + "nodesInfoResponseExample1": { + "description": "An abbreviated response when requesting cluster nodes information.", + "value": "{\n \"_nodes\": {},\n \"cluster_name\": \"elasticsearch\",\n \"nodes\": {\n \"USpTGYaBSIKbgSUJR2Z9lg\": {\n \"name\": \"node-0\",\n \"transport_address\": \"192.168.17:9300\",\n \"host\": \"node-0.elastic.co\",\n \"ip\": \"192.168.17\",\n \"version\": \"{version}\",\n \"transport_version\": 100000298,\n \"index_version\": 100000074,\n \"component_versions\": {\n \"ml_config_version\": 100000162,\n \"transform_config_version\": 100000096\n },\n \"build_flavor\": \"default\",\n \"build_type\": \"{build_type}\",\n \"build_hash\": \"587409e\",\n \"roles\": [\n \"master\",\n \"data\",\n \"ingest\"\n ],\n \"attributes\": {},\n \"plugins\": [\n {\n \"name\": \"analysis-icu\",\n \"version\": \"{version}\",\n \"description\": \"The ICU Analysis plugin integrates Lucene ICU\n module into elasticsearch, adding ICU relates analysis components.\",\n \"classname\":\n \"org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin\",\n \"has_native_controller\": false\n }\n ],\n \"modules\": [\n {\n \"name\": \"lang-painless\",\n \"version\": \"{version}\",\n \"description\": \"An easy, safe and fast scripting language for\n Elasticsearch\",\n \"classname\": \"org.elasticsearch.painless.PainlessPlugin\",\n \"has_native_controller\": false\n }\n ]\n }\n }\n}" + } + }, "name": { "name": "Response", "namespace": "nodes.info" @@ -189814,6 +191098,12 @@ ] }, "description": "Reload the keystore on nodes in the cluster.\n\nSecure settings are stored in an on-disk keystore. Certain of these settings are reloadable.\nThat is, you can change them on disk and reload them without restarting any nodes in the cluster.\nWhen you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node.\n\nWhen the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings.\nReloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted.\nAlternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.", + "examples": { + "ReloadSecureSettingsRequestExample1": { + "description": "Run `POST _nodes/reload_secure_settings` to reload the keystore on nodes in the cluster.", + "value": "{\n \"secure_settings_password\": \"keystore-password\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -191131,6 +192421,12 @@ ] }, "description": "Create or update a query rule.\nCreate or update a query rule within a query ruleset.\n\nIMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule.\nIt is advised to use one or the other in query rulesets, to avoid errors.\nAdditionally, pinned queries have a maximum limit of 100 pinned hits.\nIf multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.", + "examples": { + "QueryRulePutRequestExample1": { + "description": "Run `POST _query_rules/my-ruleset/_test` to test a ruleset. Provide the match criteria that you want to test against.\n", + "value": "{\n \"match_criteria\": {\n \"query_string\": \"puggles\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -191231,6 +192527,12 @@ ] }, "description": "Create or update a query ruleset.\nThere is a limit of 100 rules per ruleset.\nThis limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting.\n\nIMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule.\nIt is advised to use one or the other in query rulesets, to avoid errors.\nAdditionally, pinned queries have a maximum limit of 100 pinned hits.\nIf multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.", + "examples": { + "QueryRulesetPutRequestExample1": { + "description": "Run `PUT _query_rules/my-ruleset` to create a new query ruleset. Two rules are associated with `my-ruleset`. `my-rule1` will pin documents with IDs `id1` and `id2` when `user_query` contains `pugs` or `puggles` and `user_country` exactly matches `us`. `my-rule2` will exclude documents from different specified indices with IDs `id3` and `id4` when the `query_string` fuzzily matches `rescue dogs`.\n", + "value": "{\n \"rules\": [\n {\n \"rule_id\": \"my-rule1\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"contains\",\n \"metadata\": \"user_query\",\n \"values\": [ \"pugs\", \"puggles\" ]\n },\n {\n \"type\": \"exact\",\n \"metadata\": \"user_country\",\n \"values\": [ \"us\" ]\n }\n ],\n \"actions\": {\n \"ids\": [\n \"id1\",\n \"id2\"\n ]\n }\n },\n {\n \"rule_id\": \"my-rule2\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"fuzzy\",\n \"metadata\": \"user_query\",\n \"values\": [ \"rescue dogs\" ]\n }\n ],\n \"actions\": {\n \"docs\": [\n {\n \"_index\": \"index1\",\n \"_id\": \"id3\"\n },\n {\n \"_index\": \"index2\",\n \"_id\": \"id4\"\n }\n ]\n }\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -191346,6 +192648,12 @@ ] }, "description": "Test a query ruleset.\nEvaluate match criteria against a query ruleset to identify the rules that would match that criteria.", + "examples": { + "QueryRulesetTestRequestExample1": { + "description": "Run `PUT _query_rules/my-ruleset` to create a new query ruleset. Two rules are associated with `my-ruleset`. `my-rule1` will pin documents with IDs `id1` and `id2` when `user_query` contains `pugs` or `puggles` and `user_country` exactly matches `us`. `my-rule2` will exclude documents from different specified indices with IDs `id3` and `id4` when the `query_string` fuzzily matches `rescue dogs`.\n", + "value": "{\n \"rules\": [\n {\n \"rule_id\": \"my-rule1\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"contains\",\n \"metadata\": \"user_query\",\n \"values\": [ \"pugs\", \"puggles\" ]\n },\n {\n \"type\": \"exact\",\n \"metadata\": \"user_country\",\n \"values\": [ \"us\" ]\n }\n ],\n \"actions\": {\n \"ids\": [\n \"id1\",\n \"id2\"\n ]\n }\n },\n {\n \"rule_id\": \"my-rule2\",\n \"type\": \"pinned\",\n \"criteria\": [\n {\n \"type\": \"fuzzy\",\n \"metadata\": \"user_query\",\n \"values\": [ \"rescue dogs\" ]\n }\n ],\n \"actions\": {\n \"docs\": [\n {\n \"_index\": \"index1\",\n \"_id\": \"id3\"\n },\n {\n \"_index\": \"index2\",\n \"_id\": \"id4\"\n }\n ]\n }\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -192730,6 +194038,12 @@ "version": "8.11.0" }, "description": "Create a rollup job.\n\nWARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run.\n\nThe rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index.\n\nThere are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group.\n\nJobs are created in a `STOPPED` state. You can start them with the start rollup jobs API.", + "examples": { + "CreateRollupJobRequestExample1": { + "description": "Run `PUT _rollup/job/sensor` to create a rollup job that targets the `sensor-*` index pattern. This configuration enables date histograms to be used on the `timestamp` field and terms aggregations to be used on the `node` field. This configuration defines metrics over two fields: `temperature` and `voltage`. For the `temperature` field, it collects the `min`, `max`, and `sum` of the temperature. For `voltage`, it collects the `average`.\n", + "value": "{\n \"index_pattern\": \"sensor-*\",\n \"rollup_index\": \"sensor_rollup\",\n \"cron\": \"*/30 * * * * ?\",\n \"page_size\": 1000,\n \"groups\": {\n \"date_histogram\": {\n \"field\": \"timestamp\",\n \"fixed_interval\": \"1h\",\n \"delay\": \"7d\"\n },\n \"terms\": {\n \"fields\": [ \"node\" ]\n }\n },\n \"metrics\": [\n {\n \"field\": \"temperature\",\n \"metrics\": [ \"min\", \"max\", \"sum\" ]\n },\n {\n \"field\": \"voltage\",\n \"metrics\": [ \"avg\" ]\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -192844,6 +194158,12 @@ "version": "8.11.0" }, "description": "Search rolled-up data.\nThe rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data.\nIt rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.\n\nThe request body supports a subset of features from the regular search API.\nThe following functionality is not available:\n\n`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely.\n`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed.\n\n**Searching both historical rollup and non-rollup data**\n\nThe rollup search API has the capability to search across both \"live\" non-rollup data and the aggregated rollup data.\nThis is done by simply adding the live indices to the URI. For example:\n\n```\nGET sensor-1,sensor_rollup/_rollup_search\n{\n \"size\": 0,\n \"aggregations\": {\n \"max_temperature\": {\n \"max\": {\n \"field\": \"temperature\"\n }\n }\n }\n}\n```\n\nThe rollup search endpoint does two things when the search runs:\n\n* The original request is sent to the non-rollup index unaltered.\n* A rewritten version of the original request is sent to the rollup index.\n\nWhen the two responses are received, the endpoint rewrites the rollup response and merges the two together.\nDuring the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used.", + "examples": { + "RollupSearchRequestExample1": { + "description": "Search rolled up data stored in `sensor_rollup` with `GET /sensor_rollup/_rollup_search`", + "value": "{\n \"size\": 0,\n \"aggregations\": {\n \"max_temperature\": {\n \"max\": {\n \"field\": \"temperature\"\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -193497,6 +194817,12 @@ } } }, + "examples": { + "SearchApplicationGetResponseExample1": { + "description": "A sucessful response from `GET _application/search_application/my-app/`.", + "value": "{\n \"name\": \"my-app\",\n \"indices\": [ \"index1\", \"index2\" ],\n \"updated_at_millis\": 1682105622204,\n \"template\": {\n \"script\": {\n \"source\": {\n \"query\": {\n \"query_string\": {\n \"query\": \"{{query_string}}\",\n \"default_field\": \"{{default_field}}\"\n }\n }\n },\n \"lang\": \"mustache\",\n \"options\": {\n \"content_type\": \"application/json;charset=utf-8\"\n },\n \"params\": {\n \"query_string\": \"*\",\n \"default_field\": \"*\"\n }\n }\n }\n}" + } + }, "name": { "name": "Response", "namespace": "search_application.get" @@ -193565,6 +194891,12 @@ } } }, + "examples": { + "BehavioralAnalyticsGetResponseExample1": { + "description": "A successful response from `GET _application/analytics/my*`", + "value": "{\n \"my_analytics_collection\": {\n \"event_data_stream\": {\n \"name\": \"behavioral_analytics-events-my_analytics_collection\"\n }\n },\n \"my_analytics_collection2\": {\n \"event_data_stream\": {\n \"name\": \"behavioral_analytics-events-my_analytics_collection2\"\n }\n }\n}" + } + }, "name": { "name": "Response", "namespace": "search_application.get_behavioral_analytics" @@ -193664,6 +194996,12 @@ } ] }, + "examples": { + "SearchApplicationsListResponseExample1": { + "description": "A succesful response from `GET _application/search_application?from=0&size=3&q=app*` returns the first three search applications whose names start with `app`.", + "value": "{\n \"count\": 2,\n \"results\": [\n {\n \"name\": \"app-1\",\n \"updated_at_millis\": 1690981129366\n },\n {\n \"name\": \"app-2\",\n \"updated_at_millis\": 1691501823939\n }\n ]\n}" + } + }, "name": { "name": "Response", "namespace": "search_application.list" @@ -193683,6 +195021,12 @@ } }, "description": "Create a behavioral analytics collection event.", + "examples": { + "BehavioralAnalyticsEventPostRequestExample1": { + "description": "Run `POST _application/analytics/my_analytics_collection/event/search_click` to send a `search_click` event to an analytics collection called `my_analytics_collection`.", + "value": "{\n \"session\": {\n \"id\": \"1797ca95-91c9-4e2e-b1bd-9c38e6f386a9\"\n },\n \"user\": {\n \"id\": \"5f26f01a-bbee-4202-9298-81261067abbd\"\n },\n \"search\":{\n \"query\": \"search term\",\n \"results\": {\n \"items\": [\n {\n \"document\": {\n \"id\": \"123\",\n \"index\": \"products\"\n }\n }\n ],\n \"total_results\": 10\n },\n \"sort\": {\n \"name\": \"relevance\"\n },\n \"search_application\": \"website\"\n },\n \"document\":{\n \"id\": \"123\",\n \"index\": \"products\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -193812,6 +195156,12 @@ } }, "description": "Create or update a search application.", + "examples": { + "SearchApplicationPutRequestExample1": { + "description": "Run `PUT _application/search_application/my-app` to create or update a search application called `my-app`. When the dictionary parameter is specified, the search application search API will perform the following parameter validation: it accepts only the `query_string` and `default_field` parameters; it verifies that `query_string` and `default_field` are both strings; it accepts `default_field` only if it takes the values title or description. If the parameters are not valid, the search application search API will return an error.\n", + "value": "{\n \"indices\": [ \"index1\", \"index2\" ],\n \"template\": {\n \"script\": {\n \"source\": {\n \"query\": {\n \"query_string\": {\n \"query\": \"{{query_string}}\",\n \"default_field\": \"{{default_field}}\"\n }\n }\n },\n \"params\": {\n \"query_string\": \"*\",\n \"default_field\": \"*\"\n }\n },\n \"dictionary\": {\n \"properties\": {\n \"query_string\": {\n \"type\": \"string\"\n },\n \"default_field\": {\n \"type\": \"string\",\n \"enum\": [\n \"title\",\n \"description\"\n ]\n },\n \"additionalProperties\": false\n },\n \"required\": [\n \"query_string\"\n ]\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -193988,6 +195338,12 @@ ] }, "description": "Render a search application query.\nGenerate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified.\nIf a parameter used in the search template is not specified in `params`, the parameter's default value will be used.\nThe API returns the specific Elasticsearch query that would be generated and run by calling the search application search API.\n\nYou must have `read` privileges on the backing alias of the search application.", + "examples": { + "SearchApplicationsRenderQueryRequestExample1": { + "description": "Run `POST _application/search_application/my-app/_render_query` to generate a query for a search application called `my-app` that uses the search template.", + "value": "{\n \"params\": {\n \"query_string\": \"my first query\",\n \"text_fields\": [\n {\n \"name\": \"title\",\n \"boost\": 5\n },\n {\n \"name\": \"description\",\n \"boost\": 1\n }\n ]\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -194021,6 +195377,12 @@ "kind": "properties", "properties": [] }, + "examples": { + "SearchApplicationsRenderQueryResponseExample1": { + "description": "A successful response for generating a query for a search application. The `from`, `size`, and `explain` parameters were not specified in the request, so the default values specified in the search template are used.", + "value": "{\n \"from\": 0,\n \"size\": 10,\n \"query\": {\n \"multi_match\": {\n \"query\": \"my first query\",\n \"fields\": [\n \"description^1.0\",\n \"title^5.0\"\n ]\n }\n },\n \"explain\": false\n}" + } + }, "name": { "name": "Response", "namespace": "search_application.render_query" @@ -194057,6 +195419,12 @@ ] }, "description": "Run a search application search.\nGenerate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template.\nUnspecified template parameters are assigned their default values if applicable.", + "examples": { + "SearchApplicationsSearchRequestExample1": { + "description": "Use `POST _application/search_application/my-app/_search` to run a search against a search application called `my-app` that uses a search template.", + "value": "{\n \"params\": {\n \"query_string\": \"my first query\",\n \"text_fields\": [\n {\"name\": \"title\", \"boost\": 5},\n {\"name\": \"description\", \"boost\": 1}\n ]\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -194557,6 +195925,13 @@ ] }, "description": "Mount a snapshot.\nMount a snapshot as a searchable snapshot index.\nDo not use this API for snapshots managed by index lifecycle management (ILM).\nManually mounting ILM-managed snapshots can interfere with ILM processes.", + "examples": { + "SearchableSnapshotsMountSnapshotRequestExample1": { + "description": "Run `POST /_snapshot/my_repository/my_snapshot/_mount?wait_for_completion=true` to mount the index `my_docs` from an existing snapshot named `my_snapshot` stored in `my_repository` as a new index `docs`.\n", + "summary": null, + "value": "{\n \"index\": \"my_docs\",\n \"renamed_index\": \"docs\",\n \"index_settings\": {\n \"index.number_of_replicas\": 0\n },\n \"ignore_index_settings\": [ \"index.refresh_interval\" ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -197578,6 +198953,12 @@ ] }, "description": "Activate a user profile.\n\nCreate or update a user profile on behalf of another user.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nThe calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nThis API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm.\nFor example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token.\n\nWhen updating a profile document, the API enables the document if it was disabled.\nAny updates do not change existing content for either the `labels` or `data` fields.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/profile/_activate` to activate a user profile.\n", + "value": "{\n \"grant_type\": \"password\",\n \"username\" : \"jacknich\",\n \"password\" : \"l0ng-r4nd0m-p@ssw0rd\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -197896,6 +199277,13 @@ ] }, "description": "Bulk delete roles.\n\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe bulk delete roles API cannot delete roles that are defined in roles files.", + "examples": { + "SecurityBulkDeleteRoleRequestExample1": { + "description": "Run DELETE /_security/role` to delete `my_admin_role` and `my_user_role` roles.\n", + "summary": "Bulk delete example 1", + "value": "{\n \"names\": [\"my_admin_role\", \"my_user_role\"]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -198012,6 +199400,23 @@ ] }, "description": "Bulk create or update roles.\n\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe bulk create or update roles API cannot update roles that are defined in roles files.", + "examples": { + "SecurityBulkPutRoleRequestExample1": { + "description": "Run `POST /_security/role` to add roles called `my_admin_role` and `my_user_role`.\n", + "summary": "Bulk role success", + "value": "{\n \"roles\": {\n \"my_admin_role\": {\n \"cluster\": [\n \"all\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index1\",\n \"index2\"\n ],\n \"privileges\": [\n \"all\"\n ],\n \"field_security\": {\n \"grant\": [\n \"title\",\n \"body\"\n ]\n },\n \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\"\n }\n ],\n \"applications\": [\n {\n \"application\": \"myapp\",\n \"privileges\": [\n \"admin\",\n \"read\"\n ],\n \"resources\": [\n \"*\"\n ]\n }\n ],\n \"run_as\": [\n \"other_user\"\n ],\n \"metadata\": {\n \"version\": 1\n }\n },\n \"my_user_role\": {\n \"cluster\": [\n \"all\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index1\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"field_security\": {\n \"grant\": [\n \"title\",\n \"body\"\n ]\n },\n \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\"\n }\n ],\n \"applications\": [\n {\n \"application\": \"myapp\",\n \"privileges\": [\n \"admin\",\n \"read\"\n ],\n \"resources\": [\n \"*\"\n ]\n }\n ],\n \"run_as\": [\n \"other_user\"\n ],\n \"metadata\": {\n \"version\": 1\n }\n }\n }\n}" + }, + "SecurityBulkPutRoleRequestExample2": { + "description": "Because errors are handled individually for each role create or update, the API allows partial success. For example, `POST /_security/role` would throw an error for `my_admin_role` because the privilege `bad_cluster_privilege` doesn't exist, but would be successful for the `my_user_role`.\n", + "summary": "Bulk role errors", + "value": "{\n \"roles\": {\n \"my_admin_role\": {\n \"cluster\": [\n \"bad_cluster_privilege\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index1\",\n \"index2\"\n ],\n \"privileges\": [\"all\"],\n \"field_security\": {\n \"grant\": [\n \"title\",\n \"body\"\n ]\n },\n \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\"\n }\n ],\n \"applications\": [\n {\n \"application\": \"myapp\",\n \"privileges\": [\n \"admin\",\n \"read\"\n ],\n \"resources\": [\n \"*\"\n ]\n }\n ],\n \"run_as\": [\n \"other_user\"\n ],\n \"metadata\": {\n \"version\": 1\n }\n },\n \"my_user_role\": {\n \"cluster\": [\n \"all\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index1\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"field_security\": {\n \"grant\": [\n \"title\",\n \"body\"\n ]\n },\n \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\"\n }\n ],\n \"applications\": [\n {\n \"application\": \"myapp\",\n \"privileges\": [\n \"admin\",\n \"read\"\n ],\n \"resources\": [\n \"*\"\n ]\n }\n ],\n \"run_as\": [\n \"other_user\"\n ],\n \"metadata\": {\n \"version\": 1\n }\n }\n }\n}" + }, + "SecurityBulkPutRoleRequestExample3": { + "description": "Run `POST /_security/role/only_remote_access_role` to configure a role with remote indices and remote cluster privileges for a remote cluster.", + "summary": "Role example 3", + "value": "{\n \"remote_indices\": [\n {\n \"clusters\": [\"my_remote\"], \n \"names\": [\"logs*\"], \n \"privileges\": [\"read\", \"read_cross_cluster\", \"view_index_metadata\"] \n }\n ],\n \"remote_cluster\": [\n {\n \"clusters\": [\"my_remote\"], \n \"privileges\": [\"monitor_stats\"] \n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -198194,6 +199599,16 @@ ] }, "description": "Bulk update API keys.\nUpdate the attributes for multiple API keys.\n\nIMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required.\n\nThis API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates.\n\nIt is not possible to update expired or invalidated API keys.\n\nThis API supports updates to API key access scope, metadata and expiration.\nThe access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request.\nThe snapshot of the owner's permissions is updated automatically on every call.\n\nIMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified.\n\nA successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update.", + "examples": { + "SecurityBulkUpdateApiKeysRequestExample1": { + "description": "Assign new role descriptors and metadata and update the expiration time for two API keys.", + "value": "{\n \"ids\": [\n \"VuaCfGcBCdbkQm-e5aOx\",\n \"H3_AhoIBA9hmeQJdg7ij\"\n ],\n \"role_descriptors\": {\n \"role-a\": {\n \"indices\": [\n {\n \"names\": [\n \"*\"\n ],\n \"privileges\": [\n \"write\"\n ]\n }\n ]\n }\n },\n \"metadata\": {\n \"environment\": {\n \"level\": 2,\n \"trusted\": true,\n \"tags\": [\n \"production\"\n ]\n }\n },\n \"expiration\": \"30d\"\n}" + }, + "SecurityBulkUpdateApiKeysRequestExample2": { + "description": "Remove the previously assigned permissions for two API keys, making them inherit the owner user's full permissions.", + "value": "{\n \"ids\": [\n \"VuaCfGcBCdbkQm-e5aOx\",\n \"H3_AhoIBA9hmeQJdg7ij\"\n ],\n \"role_descriptors\": {}\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -198254,6 +199669,12 @@ } ] }, + "examples": { + "SecurityBulkUpdateApiKeysResponseExample1": { + "description": "A successful response from updating two API keys.", + "value": "{\n \"updated\": [\n \"VuaCfGcBCdbkQm-e5aOx\",\n \"H3_AhoIBA9hmeQJdg7ij\"\n ],\n \"noops\": []\n}" + } + }, "name": { "name": "Response", "namespace": "security.bulk_update_api_keys" @@ -198295,6 +199716,12 @@ ] }, "description": "Change passwords.\n\nChange the passwords of users in the native realm and built-in users.", + "examples": { + "SecurityChangePasswordRequestExample1": { + "description": "Run `POST /_security/user/jacknich/_password` to update the password for the `jacknich` user.\n", + "value": "{\n \"password\" : \"new-test-password\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -198935,6 +200362,12 @@ ] }, "description": "Create an API key.\n\nCreate an API key for access without requiring basic authentication.\n\nIMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges.\nIf you specify privileges, the API returns an error.\n\nA successful request returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nNOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.\n\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\nTo configure or turn off the API key service, refer to API key service setting documentation.", + "examples": { + "SecurityCreateApiKeyRequestExample1": { + "description": "Run `POST /_security/api_key` to create an API key. If `expiration` is not provided, the API keys do not expire. If `role_descriptors` is not provided, the permissions of the authenticated user are applied.\n", + "value": "{\n \"name\": \"my-api-key\",\n \"expiration\": \"1d\", \n \"role_descriptors\": { \n \"role-a\": {\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [\"index-a*\"],\n \"privileges\": [\"read\"]\n }\n ]\n },\n \"role-b\": {\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [\"index-b*\"],\n \"privileges\": [\"all\"]\n }\n ]\n }\n },\n \"metadata\": {\n \"application\": \"my-application\",\n \"environment\": {\n \"level\": 1,\n \"trusted\": true,\n \"tags\": [\"dev\", \"staging\"]\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -199100,6 +200533,12 @@ ] }, "description": "Create a cross-cluster API key.\n\nCreate an API key of the `cross_cluster` type for the API key based remote cluster access.\nA `cross_cluster` API key cannot be used to authenticate through the REST interface.\n\nIMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error.\n\nCross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled.\n\nNOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property.\n\nA successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds.\n\nBy default, API keys never expire. You can specify expiration information when you create the API keys.\n\nCross-cluster API keys can only be updated with the update cross-cluster API key API.\nAttempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error.", + "examples": { + "CreateCrossClusterApiKeyRequestExample1": { + "description": "Run `POST /_security/cross_cluster/api_key` to create a cross-cluster API key.\n", + "value": "{\n \"name\": \"my-cross-cluster-api-key\",\n \"expiration\": \"1d\", \n \"access\": {\n \"search\": [ \n {\n \"names\": [\"logs*\"]\n }\n ],\n \"replication\": [ \n {\n \"names\": [\"archive*\"]\n }\n ]\n },\n \"metadata\": {\n \"description\": \"phase one\",\n \"environment\": {\n \"level\": 1,\n \"trusted\": true,\n \"tags\": [\"dev\", \"staging\"]\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -199585,6 +201024,12 @@ ] }, "description": "Delegate PKI authentication.\n\nThis API implements the exchange of an X509Certificate chain for an Elasticsearch access token.\nThe certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`.\nA successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm.\n\nThis API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch.\n\nIMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated.\nThis is part of the TLS authentication process and it is delegated to the proxy that calls this API.\nThe proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token.", + "examples": { + "SecurityDelegatePkiRequestExample1": { + "description": "Delegate a one element certificate chain.", + "value": "{\n\"x509_certificate_chain\": [\"MIIDeDCCAmCgAwIBAgIUBzj/nGGKxP2iXawsSquHmQjCJmMwDQYJKoZIhvcNAQELBQAwUzErMCkGA1UEAxMiRWxhc3RpY3NlYXJjaCBUZXN0IEludGVybWVkaWF0ZSBDQTEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMB4XDTIzMDcxODE5MjkwNloXDTQzMDcxMzE5MjkwNlowSjEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAllHL4pQkkfwAm/oLkxYYO+r950DEy1bjH+4viCHzNADLCTWO+lOZJVlNx7QEzJE3QGMdif9CCBBxQFMapA7oUFCLq84fPSQQu5AnvvbltVD9nwVtCs+9ZGDjMKsz98RhSLMFIkxdxi6HkQ3Lfa4ZSI4lvba4oo+T/GveazBDS+NgmKyq00EOXt3tWi1G9vEVItommzXWfv0agJWzVnLMldwkPqsw0W7zrpyT7FZS4iLbQADGceOW8fiauOGMkscu9zAnDR/SbWl/chYioQOdw6ndFLn1YIFPd37xL0WsdsldTpn0vH3YfzgLMffT/3P6YlwBegWzsx6FnM/93Ecb4wIDAQABo00wSzAJBgNVHRMEAjAAMB0GA1UdDgQWBBQKNRwjW+Ad/FN1Rpoqme/5+jrFWzAfBgNVHSMEGDAWgBRcya0c0x/PaI7MbmJVIylWgLqXNjANBgkqhkiG9w0BAQsFAAOCAQEACZ3PF7Uqu47lplXHP6YlzYL2jL0D28hpj5lGtdha4Muw1m/BjDb0Pu8l0NQ1z3AP6AVcvjNDkQq6Y5jeSz0bwQlealQpYfo7EMXjOidrft1GbqOMFmTBLpLA9SvwYGobSTXWTkJzonqVaTcf80HpMgM2uEhodwTcvz6v1WEfeT/HMjmdIsq4ImrOL9RNrcZG6nWfw0HR3JNOgrbfyEztEI471jHznZ336OEcyX7gQuvHE8tOv5+oD1d7s3Xg1yuFp+Ynh+FfOi3hPCuaHA+7F6fLmzMDLVUBAllugst1C3U+L/paD7tqIa4ka+KNPCbSfwazmJrt4XNiivPR4hwH5g==\"]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -199653,6 +201098,12 @@ } ] }, + "examples": { + "SecurityDelegatePkiResponseExample1": { + "description": "A successful response from delegating a one element certificate chain.", + "value": "{\n \"access_token\": \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\",\n \"type\": \"Bearer\",\n \"expires_in\": 1200,\n \"authentication\": {\n \"username\": \"Elasticsearch Test Client\",\n \"roles\": [],\n \"full_name\": null,\n \"email\": null,\n \"metadata\": {\n \"pki_dn\": \"O=org, OU=Elasticsearch, CN=Elasticsearch Test Client\",\n \"pki_delegated_by_user\": \"test_admin\",\n \"pki_delegated_by_realm\": \"file\"\n },\n \"enabled\": true,\n \"authentication_realm\": {\n \"name\": \"pki1\",\n \"type\": \"pki\"\n },\n \"lookup_realm\": {\n \"name\": \"pki1\",\n \"type\": \"pki\"\n },\n \"authentication_type\": \"realm\"\n }\n}" + } + }, "name": { "name": "Response", "namespace": "security.delegate_pki" @@ -201806,6 +203257,18 @@ ] }, "description": "Get a token.\n\nCreate a bearer token for access without requiring basic authentication.\nThe tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface.\nAlternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting.\nWhen you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface.\n\nThe get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body.\n\nA successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available.\n\nThe tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used.\nThat time period is defined by the `xpack.security.authc.token.timeout` setting.\nIf you want to invalidate a token immediately, you can do so by using the invalidate token API.", + "examples": { + "GetUserAccessTokenRequestExample1": { + "description": "Run `POST /_security/oauth2/token` to obtain a token using the `client_credentials` grant type, which simply creates a token as the authenticated user.\n", + "summary": "A client_credentials grant type example", + "value": "{\n \"grant_type\" : \"client_credentials\"\n}" + }, + "GetUserAccessTokenRequestExample2": { + "description": "Run `POST /_security/oauth2/token` to obtain a token for the `test_admin` user using the password grant type. This request needs to be made by an authenticated user with sufficient privileges that may or may not be the same as the one whose username is passed in the `username` parameter.\n", + "summary": "A password grant type example", + "value": "{\n \"grant_type\" : \"password\",\n \"username\" : \"test_admin\",\n \"password\" : \"x-pack-test-password\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -202550,6 +204013,18 @@ ] }, "description": "Grant an API key.\n\nCreate an API key on behalf of another user.\nThis API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API.\nThe caller must have authentication credentials for the user on whose behalf the API key will be created.\nIt is not possible to use this API to create an API key without that user's credentials.\nThe supported user authentication credential types are:\n\n* username and password\n* Elasticsearch access tokens\n* JWTs\n\nThe user, for whom the authentication credentials is provided, can optionally \"run as\" (impersonate) another user.\nIn this case, the API key will be created on behalf of the impersonated user.\n\nThis API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf.\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\n\nA successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nBy default, API keys never expire. You can specify expiration information when you create the API keys.", + "examples": { + "SecurityGrantApiKeyRequestExample1": { + "description": "Run `POST /_security/api_key/grant` to create an API key on behalf of the `test_admin` user.\n", + "summary": "Grant an API key", + "value": "{\n \"grant_type\": \"password\",\n \"username\" : \"test_admin\",\n \"password\" : \"x-pack-test-password\",\n \"api_key\" : {\n \"name\": \"my-api-key\",\n \"expiration\": \"1d\",\n \"role_descriptors\": {\n \"role-a\": {\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [\"index-a*\"],\n \"privileges\": [\"read\"]\n }\n ]\n },\n \"role-b\": {\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [\"index-b*\"],\n \"privileges\": [\"all\"]\n }\n ]\n }\n },\n \"metadata\": {\n \"application\": \"my-application\",\n \"environment\": {\n \"level\": 1,\n \"trusted\": true,\n \"tags\": [\"dev\", \"staging\"]\n }\n }\n }\n}" + }, + "SecurityGrantApiKeyRequestExample2": { + "description": "Run `POST /_security/api_key/grant`. The user (`test_admin`) whose credentials are provided can \"run as\" another user (`test_user`). The API key will be granted to the impersonated user (`test_user`).\n", + "summary": "Grant an API key with run_as", + "value": "{\n \"grant_type\": \"password\",\n \"username\" : \"test_admin\", \n \"password\" : \"x-pack-test-password\", \n \"run_as\": \"test_user\", \n \"api_key\" : {\n \"name\": \"another-api-key\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -202848,6 +204323,12 @@ ] }, "description": "Check user privileges.\n\nDetermine whether the specified user has a specified list of privileges.\nAll users can use this API, but only to determine their own privileges.\nTo check the privileges of other users, you must use the run as feature.", + "examples": { + "SecurityHasPrivilegesRequestExample1": { + "description": "Run `GET /_security/user/_has_privileges` to check whether the current user has a specific set of cluster, index, and application privileges.", + "value": "{\n \"cluster\": [ \"monitor\", \"manage\" ],\n \"index\" : [\n {\n \"names\": [ \"suppliers\", \"products\" ],\n \"privileges\": [ \"read\" ]\n },\n {\n \"names\": [ \"inventory\" ],\n \"privileges\" : [ \"read\", \"write\" ]\n }\n ],\n \"application\": [\n {\n \"application\": \"inventory_manager\",\n \"privileges\" : [ \"read\", \"data:write/inventory\" ],\n \"resources\" : [ \"product/1852563\" ]\n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -203125,6 +204606,12 @@ ] }, "description": "Check user profile privileges.\n\nDetermine whether the users associated with the specified user profile IDs have all the requested privileges.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/profile/_has_privileges` to check whether the two users associated with the specified profiles have all the requested set of cluster, index, and application privileges.\n", + "value": "{\n \"uids\": [\n \"u_LQPnxDxEjIH0GOUoFkZr5Y57YUwSkL9Joiq-g4OCbPc_0\",\n \"u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1\",\n \"u_does-not-exist_0\"\n ],\n \"privileges\": {\n \"cluster\": [ \"monitor\", \"create_snapshot\", \"manage_ml\" ],\n \"index\" : [\n {\n \"names\": [ \"suppliers\", \"products\" ],\n \"privileges\": [ \"create_doc\"]\n },\n {\n \"names\": [ \"inventory\" ],\n \"privileges\" : [ \"read\", \"write\" ]\n }\n ],\n \"application\": [\n {\n \"application\": \"inventory_manager\",\n \"privileges\" : [ \"read\", \"data:write/inventory\" ],\n \"resources\" : [ \"product/1852563\" ]\n }\n ]\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -203265,6 +204752,38 @@ ] }, "description": "Invalidate API keys.\n\nThis API invalidates API keys created by the create API key or grant API key APIs.\nInvalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted.\n\nTo use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges.\nThe `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys.\nThe `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys.\nThe `manage_own_api_key` only allows deleting REST API keys that are owned by the user.\nIn addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats:\n\n- Set the parameter `owner=true`.\n- Or, set both `username` and `realm_name` to match the user's identity.\n- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field.", + "examples": { + "SecurityInvalidateApiKeyRequestExample1": { + "description": "Run `DELETE /_security/api_key` to invalidate the API keys identified by ID.", + "summary": "API keys by ID", + "value": "{\n \"ids\" : [ \"VuaCfGcBCdbkQm-e5aOx\" ]\n}" + }, + "SecurityInvalidateApiKeyRequestExample2": { + "description": "Run `DELETE /_security/api_key` to invalidate the API keys identified by name.", + "summary": "API keys by name", + "value": "{\n \"name\" : \"my-api-key\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample3": { + "description": "Run `DELETE /_security/api_key` to invalidate all API keys for the `native1` realm.", + "summary": "API keys by realm", + "value": "{\n \"realm_name\" : \"native1\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample4": { + "description": "Run `DELETE /_security/api_key` to invalidate all API keys for the user `myuser` in all realms.", + "summary": "API keys by user", + "value": "{\n \"username\" : \"myuser\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample5": { + "description": "Run `DELETE /_security/api_key` to invalidate the API keys identified by ID if they are owned by the currently authenticated user.", + "summary": "API keys by ID and owner", + "value": "{\n \"ids\" : [\"VuaCfGcBCdbkQm-e5aOx\"],\n \"owner\" : \"true\"\n}" + }, + "SecurityInvalidateApiKeyRequestExample6": { + "description": "Run `DELETE /_security/api_key` to invalidate all API keys for the user `myuser` in the `native1` realm .", + "summary": "API keys by user and realm", + "value": "{\n \"username\" : \"myuser\",\n \"realm_name\" : \"native1\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -203408,6 +204927,33 @@ ] }, "description": "Invalidate a token.\n\nThe access tokens returned by the get token API have a finite period of time for which they are valid.\nAfter that time period, they can no longer be used.\nThe time period is defined by the `xpack.security.authc.token.timeout` setting.\n\nThe refresh tokens returned by the get token API are only valid for 24 hours.\nThey can also be used exactly once.\nIf you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API.\n\nNOTE: While all parameters are optional, at least one of them is required.\nMore specifically, either one of `token` or `refresh_token` parameters is required.\nIf none of these two are specified, then `realm_name` and/or `username` need to be specified.", + "examples": { + "SecurityInvalidateTokenRequestExample1": { + "description": "Run `DELETE /_security/oauth2/token` to invalidate an access token.\n", + "summary": "Invalidate an access token", + "value": "{\n \"token\" : \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\"\n}" + }, + "SecurityInvalidateTokenRequestExample2": { + "description": "Run `DELETE /_security/oauth2/token` to invalidate a refresh token.\n", + "summary": "Invalidate a refresh token", + "value": "{\n \"refresh_token\" : \"vLBPvmAB6KvwvJZr27cS\"\n}" + }, + "SecurityInvalidateTokenRequestExample3": { + "description": "Run `DELETE /_security/oauth2/token` to invalidate all access tokens and refresh tokens for the `saml1` realm.", + "summary": "Invalidate tokens by realm", + "value": "{\n \"realm_name\" : \"saml1\"\n}" + }, + "SecurityInvalidateTokenRequestExample4": { + "description": "Run `DELETE /_security/oauth2/token` to invalidate all access tokens and refresh tokens for the user `myuser` in all realms.", + "summary": "Invalidate tokens by user", + "value": "{\n \"username\" : \"myuser\"\n}" + }, + "SecurityInvalidateTokenRequestExample5": { + "description": "Run `DELETE /_security/oauth2/token` to invalidate all access tokens and refresh tokens for the user `myuser` in the `saml1` realm.", + "summary": "Invalidate tokens by user and realm", + "value": "{\n \"username\" : \"myuser\",\n \"realm_name\" : \"saml1\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -203545,6 +205091,12 @@ ] }, "description": "Authenticate OpenID Connect.\n\nExchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication.\n\nElasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs.\nThese APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/oidc/authenticate` to exchange the response that was returned from the OpenID Connect Provider after a successful authentication for an Elasticsearch access token and refresh token. This example is from an authentication that uses the authorization code grant flow.\n", + "value": "{\n \"redirect_uri\" : \"https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I\",\n \"state\" : \"4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I\",\n \"nonce\" : \"WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM\",\n \"realm\" : \"oidc1\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -203655,6 +205207,12 @@ ] }, "description": "Logout of OpenID Connect.\n\nInvalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API.\n\nIf the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout.\n\nElasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs.\nThese APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/oidc/logout` to perform the logout.", + "value": "{\n \"token\" : \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\",\n \"refresh_token\": \"vLBPvmAB6KvwvJZr27cS\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -203765,6 +205323,23 @@ ] }, "description": "Prepare OpenID connect authentication.\n\nCreate an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch.\n\nThe response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process.\n\nElasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs.\nThese APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/oidc/prepare` to generate an authentication request for the OpenID Connect Realm `oidc1`.\n", + "summary": "Prepare with realm", + "value": "{\n \"realm\" : \"oidc1\"\n}" + }, + "RequestExample2": { + "description": "Run `POST /_security/oidc/prepare` to generate an authentication request for the OpenID Connect Realm `oidc1`, where the values for the `state` and the `nonce` have been generated by the client.\n", + "summary": "Prepare with realm, state, and nonce", + "value": "{\n \"realm\" : \"oidc1\",\n \"state\" : \"lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO\",\n \"nonce\" : \"zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5\"\n}" + }, + "RequestExample3": { + "description": "Run `POST /_security/oidc/prepare` to generate an authentication request for a third party initiated single sign on. Specify the issuer that should be used for matching the appropriate OpenID Connect Authentication realm.\n", + "summary": "Prepare by realm", + "value": "{\n \"iss\" : \"http://127.0.0.1:8080\",\n \"login_hint\": \"this_is_an_opaque_string\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -203933,6 +205508,18 @@ } }, "description": "Create or update application privileges.\n\nTo use this API, you must have one of the following privileges:\n\n* The `manage_security` cluster privilege (or a greater privilege such as `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.\n\nApplication names are formed from a prefix, with an optional suffix that conform to the following rules:\n\n* The prefix must begin with a lowercase ASCII letter.\n* The prefix must contain only ASCII letters or digits.\n* The prefix must be at least 3 characters long.\n* If the suffix exists, it must begin with either a dash `-` or `_`.\n* The suffix cannot contain any of the following characters: `\\`, `/`, `*`, `?`, `\"`, `<`, `>`, `|`, `,`, `*`.\n* No part of the name can contain whitespace.\n\nPrivilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`.\n\nAction names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`.", + "examples": { + "SecurityPutPrivilegesRequestExample1": { + "description": "Run `PUT /_security/privilege` to add a single application privilege. The wildcard (`*`) means that this privilege grants access to all actions that start with `data:read/`. Elasticsearch does not assign any meaning to these actions. However, if the request includes an application privilege such as `data:read/users` or `data:read/settings`, the has privileges API respects the use of a wildcard and returns `true`.\n", + "summary": "Add a privilege", + "value": "{\n \"myapp\": {\n \"read\": {\n \"actions\": [ \n \"data:read/*\" , \n \"action:login\" ],\n \"metadata\": { \n \"description\": \"Read access to myapp\"\n }\n }\n }\n}" + }, + "SecurityPutPrivilegesRequestExample2": { + "description": "Run `PUT /_security/privilege` to add multiple application privileges.\n", + "summary": "Add multiple privileges", + "value": "{\n \"app01\": {\n \"read\": {\n \"actions\": [ \"action:login\", \"data:read/*\" ]\n },\n \"write\": {\n \"actions\": [ \"action:login\", \"data:write/*\" ]\n }\n },\n \"app02\": {\n \"all\": {\n \"actions\": [ \"*\" ]\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -204178,6 +205765,23 @@ ] }, "description": "Create or update roles.\n\nThe role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management.\nThe create or update roles API cannot update roles that are defined in roles files.\nFile-based role management is not available in Elastic Serverless.", + "examples": { + "SecurityPutRoleRequestExample1": { + "description": "Run `POST /_security/role/my_admin_role` to create a role.", + "summary": "Role example 1", + "value": "{\n \"description\": \"Grants full access to all management features within the cluster.\",\n \"cluster\": [\"all\"],\n \"indices\": [\n {\n \"names\": [ \"index1\", \"index2\" ],\n \"privileges\": [\"all\"],\n \"field_security\" : { // optional\n \"grant\" : [ \"title\", \"body\" ]\n },\n \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\" // optional\n }\n ],\n \"applications\": [\n {\n \"application\": \"myapp\",\n \"privileges\": [ \"admin\", \"read\" ],\n \"resources\": [ \"*\" ]\n }\n ],\n \"run_as\": [ \"other_user\" ], // optional\n \"metadata\" : { // optional\n \"version\" : 1\n }\n}" + }, + "SecurityPutRoleRequestExample2": { + "description": "Run `POST /_security/role/cli_or_drivers_minimal` to configure a role that can run SQL in JDBC.", + "summary": "Role example 2", + "value": "{\n \"cluster\": [\"cluster:monitor/main\"],\n \"indices\": [\n {\n \"names\": [\"test\"],\n \"privileges\": [\"read\", \"indices:admin/get\"]\n }\n ]\n}" + }, + "SecurityPutRoleRequestExample3": { + "description": "Run `POST /_security/role/only_remote_access_role` to configure a role with remote indices and remote cluster privileges for a remote cluster.", + "summary": "Role example 3", + "value": "{\n \"remote_indices\": [\n {\n \"clusters\": [\"my_remote\"], \n \"names\": [\"logs*\"], \n \"privileges\": [\"read\", \"read_cross_cluster\", \"view_index_metadata\"] \n }\n ],\n \"remote_cluster\": [\n {\n \"clusters\": [\"my_remote\"], \n \"privileges\": [\"monitor_stats\"] \n }\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -204334,6 +205938,53 @@ ] }, "description": "Create or update role mappings.\n\nRole mappings define which roles are assigned to each user.\nEach mapping has rules that identify users and a list of roles that are granted to those users.\nThe role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files.\n\nNOTE: This API does not create roles. Rather, it maps users to existing roles.\nRoles can be created by using the create or update roles API or roles files.\n\n**Role templates**\n\nThe most common use for role mappings is to create a mapping from a known value on the user to a fixed role name.\nFor example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch.\nThe `roles` field is used for this purpose.\n\nFor more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user.\nThe `role_templates` field is used for this purpose.\n\nNOTE: To use role templates successfully, the relevant scripting feature must be enabled.\nOtherwise, all attempts to create a role mapping with role templates fail.\n\nAll of the user fields that are available in the role mapping rules are also available in the role templates.\nThus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated.\n\nBy default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user.\nIf the format of the template is set to \"json\" then the template is expected to produce a JSON string or an array of JSON strings for the role names.", + "examples": { + "SecurityPutRoleMappingRequestExample1": { + "description": "Run `POST /_security/role_mapping/mapping1` to assign the `user` role to all users.\n", + "summary": "Roles for all users", + "value": "{\n \"roles\": [ \"user\"],\n \"enabled\": true, \n \"rules\": {\n \"field\" : { \"username\" : \"*\" }\n },\n \"metadata\" : { \n \"version\" : 1\n }\n}" + }, + "SecurityPutRoleMappingRequestExample2": { + "description": "Run `POST /_security/role_mapping/mapping2` to assign the \"user\" and \"admin\" roles to specific users.\n", + "summary": "Roles for specific users", + "value": "{\n \"roles\": [ \"user\", \"admin\" ],\n \"enabled\": true,\n \"rules\": {\n \"field\" : { \"username\" : [ \"esadmin01\", \"esadmin02\" ] }\n }\n}" + }, + "SecurityPutRoleMappingRequestExample3": { + "description": "Run `POST /_security/role_mapping/mapping3` to match users who authenticated against a specific realm.\n", + "summary": "Roles for specific realms", + "value": "{\n \"roles\": [ \"ldap-user\" ],\n \"enabled\": true,\n \"rules\": {\n \"field\" : { \"realm.name\" : \"ldap1\" }\n }\n}" + }, + "SecurityPutRoleMappingRequestExample4": { + "description": "Run `POST /_security/role_mapping/mapping4` to match any user where either the username is `esadmin` or the user is in the `cn=admin,dc=example,dc=com group`. This example is useful when the group names in your identity management system (such as Active Directory, or a SAML Identity Provider) do not have a one-to-one correspondence with the names of roles in Elasticsearch. The role mapping is the means by which you link a group name with a role name.\n", + "summary": "Roles for specific groups", + "value": "{\n \"roles\": [ \"superuser\" ],\n \"enabled\": true,\n \"rules\": {\n \"any\": [\n {\n \"field\": {\n \"username\": \"esadmin\"\n }\n },\n {\n \"field\": {\n \"groups\": \"cn=admins,dc=example,dc=com\"\n }\n }\n ]\n }\n}" + }, + "SecurityPutRoleMappingRequestExample5": { + "description": "Run `POST /_security/role_mapping/mapping5` to use an array syntax for the groups field when there are multiple groups. This pattern matches any of the groups (rather than all of the groups).\n", + "summary": "Roles for multiple groups", + "value": "{\n \"role_templates\": [\n {\n \"template\": { \"source\": \"{{#tojson}}groups{{/tojson}}\" }, \n \"format\" : \"json\" \n }\n ],\n \"rules\": {\n \"field\" : { \"realm.name\" : \"saml1\" }\n },\n \"enabled\": true\n}" + }, + "SecurityPutRoleMappingRequestExample6": { + "description": "Run `POST /_security/role_mapping/mapping6` for rare cases when the names of your groups may be an exact match for the names of your Elasticsearch roles. This can be the case when your SAML Identity Provider includes its own \"group mapping\" feature and can be configured to release Elasticsearch role names in the user's SAML attributes. In these cases it is possible to use a template that treats the group names as role names.\nNOTE: This should only be done if you intend to define roles for all of the provided groups. Mapping a user to a large number of unnecessary or undefined roles is inefficient and can have a negative effect on system performance. If you only need to map a subset of the groups, you should do it by using explicit mappings.\nThe `tojson` mustache function is used to convert the list of group names into a valid JSON array. Because the template produces a JSON array, the `format` must be set to `json`.\n", + "summary": "Templated roles for groups", + "value": "{\n \"role_templates\": [\n {\n \"template\": { \"source\": \"{{#tojson}}groups{{/tojson}}\" }, \n \"format\" : \"json\" \n }\n ],\n \"rules\": {\n \"field\" : { \"realm.name\" : \"saml1\" }\n },\n \"enabled\": true\n}" + }, + "SecurityPutRoleMappingRequestExample7": { + "description": "Run `POST /_security/role_mapping/mapping7` to match users within a particular LDAP sub-tree in a specific realm.\n", + "summary": "Users in a LDAP sub-tree and realm", + "value": "{\n \"roles\": [ \"ldap-example-user\" ],\n \"enabled\": true,\n \"rules\": {\n \"all\": [\n { \"field\" : { \"dn\" : \"*,ou=subtree,dc=example,dc=com\" } },\n { \"field\" : { \"realm.name\" : \"ldap1\" } }\n ]\n }\n}" + }, + "SecurityPutRoleMappingRequestExample8": { + "description": "Run `POST /_security/role_mapping/mapping8` to assign rules that are complex and include wildcard matching. For example, this mapping matches any user where all of these conditions are met: the Distinguished Name matches the pattern `*,ou=admin,dc=example,dc=com`, or the `username` is `es-admin`, or the `username` is `es-system`; the user is in the `cn=people,dc=example,dc=com` group; the user does not have a `terminated_date`.\n", + "summary": "Complex roles", + "value": "{\n \"roles\": [ \"superuser\" ],\n \"enabled\": true,\n \"rules\": {\n \"all\": [\n {\n \"any\": [\n {\n \"field\": {\n \"dn\": \"*,ou=admin,dc=example,dc=com\"\n }\n },\n {\n \"field\": {\n \"username\": [ \"es-admin\", \"es-system\" ]\n }\n }\n ]\n },\n {\n \"field\": {\n \"groups\": \"cn=people,dc=example,dc=com\"\n }\n },\n {\n \"except\": {\n \"field\": {\n \"metadata.terminated_date\": null\n }\n }\n }\n ]\n }\n}" + }, + "SecurityPutRoleMappingRequestExample9": { + "description": "Run `POST /_security/role_mapping/mapping9` to use templated roles to automatically map every user to their own custom role. In this example every user who authenticates using the `cloud-saml` realm will be automatically mapped to two roles: the `saml_user` role and a role that is their username prefixed with `_user_`. For example, the user `nwong` would be assigned the `saml_user` and `_user_nwong` roles.\n", + "summary": "Templated roles", + "value": "{\n \"rules\": { \"field\": { \"realm.name\": \"cloud-saml\" } },\n \"role_templates\": [\n { \"template\": { \"source\" : \"saml_user\" } }, \n { \"template\": { \"source\" : \"_user_{{username}}\" } }\n ],\n \"enabled\": true\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -204545,6 +206196,12 @@ ] }, "description": "Create or update users.\n\nAdd and update users in the native realm.\nA password is required for adding a new user but is optional when updating an existing user.\nTo change a user's password without updating any other fields, use the change password API.", + "examples": { + "SecurityPutUserRequestExample1": { + "description": "Run `POST /_security/user/jacknich` to activate a user profile.", + "value": "{\n \"password\" : \"l0ng-r4nd0m-p@ssw0rd\",\n \"roles\" : [ \"admin\", \"other_role1\" ],\n \"full_name\" : \"Jack Nicholson\",\n \"email\" : \"jacknich@example.com\",\n \"metadata\" : {\n \"intelligence\" : 7\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -205294,6 +206951,23 @@ ] }, "description": "Find API keys with a query.\n\nGet a paginated list of API keys and their information.\nYou can optionally filter the results with a query.\n\nTo use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges.\nIf you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.", + "examples": { + "QueryApiKeysRequestExample1": { + "description": "Run `GET /_security/_query/api_key?with_limited_by=true` to retrieve an API key by ID.", + "summary": "Query API keys by ID", + "value": "{\n \"query\": {\n \"ids\": {\n \"values\": [\n \"VuaCfGcBCdbkQm-e5aOx\"\n ]\n }\n }\n}" + }, + "QueryApiKeysRequestExample2": { + "description": "Run `GET /_security/_query/api_key`. Use a `bool` query to issue complex logical conditions and use `from`, `size`, and `sort` to help paginate the result. For example, the API key name must begin with `app1-key-` and must not be `app1-key-01`. It must be owned by a username with the wildcard pattern `org-*-user` and the `environment` metadata field must have a `production` value. The offset to begin the search result is the twentieth (zero-based index) API key. The page size of the response is 10 API keys. The result is first sorted by creation date in descending order, then by name in ascending order.\n", + "summary": "Query API keys with pagination", + "value": "{\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"prefix\": {\n \"name\": \"app1-key-\" \n }\n },\n {\n \"term\": {\n \"invalidated\": \"false\" \n }\n }\n ],\n \"must_not\": [\n {\n \"term\": {\n \"name\": \"app1-key-01\" \n }\n }\n ],\n \"filter\": [\n {\n \"wildcard\": {\n \"username\": \"org-*-user\" \n }\n },\n {\n \"term\": {\n \"metadata.environment\": \"production\" \n }\n }\n ]\n }\n },\n \"from\": 20, \n \"size\": 10, \n \"sort\": [ \n { \"creation\": { \"order\": \"desc\", \"format\": \"date_time\" } },\n \"name\"\n ]\n}" + }, + "QueryApiKeysRequestExample3": { + "description": "Run `GET /_security/_query/api_key` to retrieve the API key by name.", + "summary": "Query API keys by name", + "value": "{\n \"query\": {\n \"term\": {\n \"name\": {\n \"value\": \"application-key-1\"\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -205556,6 +207230,18 @@ ] }, "description": "Find roles with a query.\n\nGet roles in a paginated manner.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe query roles API does not retrieve roles that are defined in roles files, nor built-in ones.\nYou can optionally filter the results with a query.\nAlso, the results can be paginated and sorted.", + "examples": { + "QueryRolesRequestExample1": { + "description": "Run `POST /_security/_query/role` to lists all roles, sorted by the role name.", + "summary": "Query roles by name", + "value": "{\n \"sort\": [\"name\"]\n}" + }, + "QueryRolesRequestExample2": { + "description": "Run `POST /_security/_query/role` to query only the user access role, given its description. It returns only the best matching role because `size` is set to `1`.\n", + "summary": "Query roles by description", + "value": "{\n \"query\": {\n \"match\": {\n \"description\": {\n \"query\": \"user access\"\n }\n }\n },\n \"size\": 1 \n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -205949,6 +207635,18 @@ ] }, "description": "Find users with a query.\n\nGet information for users in a paginated manner.\nYou can optionally filter the results with a query.\n\nNOTE: As opposed to the get user API, built-in users are excluded from the result.\nThis API is only for native users.", + "examples": { + "SecurityQueryUserRequestExample1": { + "description": "Run `POST /_security/_query/user?with_profile_uid=true` to get users that have roles that are prefixed with `other`. It will also include the user `profile_uid` in the response.\n", + "summary": "Query users by role prefix", + "value": "{\n \"query\": {\n \"prefix\": {\n \"roles\": \"other\"\n }\n }\n}" + }, + "SecurityQueryUserRequestExample2": { + "description": "Run `POST /_security/_query/user`. Use a `bool` query to issue complex logical conditions: The `email` must end with `example.com`. The user must be enabled. The result will be filtered to only contain users with at least one role that contains the substring `other`. The offset to begin the search result is the second (zero-based index) user. The page size of the response is two users. The result is sorted by `username` in descending order.\n", + "summary": "Query users with multiple conditions", + "value": "{\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"wildcard\": {\n \"email\": \"*example.com\" \n }\n },\n {\n \"term\": {\n \"enabled\": true \n }\n }\n ],\n \"filter\": [\n {\n \"wildcard\": {\n \"roles\": \"*other*\" \n }\n }\n ]\n }\n },\n \"from\": 1, \n \"size\": 2, \n \"sort\": [\n { \"username\": { \"order\": \"desc\"} } \n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -206299,6 +207997,12 @@ ] }, "description": "Authenticate SAML.\n\nSubmit a SAML response message to Elasticsearch for consumption.\n\nNOTE: This API is intended for use by custom web applications other than Kibana.\nIf you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.\n\nThe SAML message that is submitted can be:\n\n* A response to a SAML authentication request that was previously created using the SAML prepare authentication API.\n* An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow.\n\nIn either case, the SAML message needs to be a base64 encoded XML document with a root element of ``.\n\nAfter successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication.\nThis API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/saml/authenticate` to exchange a SAML Response indicating a successful authentication at the SAML IdP for an Elasticsearch access token and refresh token to be used in subsequent requests.\n", + "value": "{\n \"content\" : \"PHNhbWxwOlJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6cHJvdG9jb2wiIHhtbG5zOnNhbWw9InVybjpvYXNpczpuYW1lczp0YzpTQU1MOjIuMD.....\",\n \"ids\" : [\"4fee3b046395c4e751011e97f8900b5273d56685\"]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -206445,6 +208149,18 @@ ] }, "description": "Logout of SAML completely.\n\nVerifies the logout response sent from the SAML IdP.\n\nNOTE: This API is intended for use by custom web applications other than Kibana.\nIf you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.\n\nThe SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout.\nThis API verifies the response by ensuring the content is relevant and validating its signature.\nAn empty response is returned if the verification process is successful.\nThe response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding.\nThe caller of this API must prepare the request accordingly so that this API can handle either of them.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/saml/complete_logout` to verify the logout response sent by the SAML IdP using the HTTP-Redirect binding.\n", + "summary": "HTTP-Redirect binding", + "value": "{\n \"realm\": \"saml1\",\n \"ids\": [ \"_1c368075e0b3...\" ],\n \"query_string\": \"SAMLResponse=fZHLasMwEEVbfb1bf...&SigAlg=http%3A%2F%2Fwww.w3.org%2F2000%2F09%2Fxmldsig%23rsa-sha1&Signature=CuCmFn%2BLqnaZGZJqK...\"\n}" + }, + "RequestExample2": { + "description": "Run `POST /_security/saml/complete_logout` to verify the logout response sent by the SAML IdP using the HTTP-Post binding.\n", + "summary": "HTTP-Post binding", + "value": "{\n \"realm\": \"saml1\",\n \"ids\": [ \"_1c368075e0b3...\" ],\n \"content\": \"PHNhbWxwOkxvZ291dFJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46...\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -206517,6 +208233,12 @@ ] }, "description": "Invalidate SAML.\n\nSubmit a SAML LogoutRequest message to Elasticsearch for consumption.\n\nNOTE: This API is intended for use by custom web applications other than Kibana.\nIf you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.\n\nThe logout request comes from the SAML IdP during an IdP initiated Single Logout.\nThe custom web application can use this API to have Elasticsearch process the `LogoutRequest`.\nAfter successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message.\nThus the user can be redirected back to their IdP.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/saml/invalidate` to invalidate all the tokens for realm `saml1` pertaining to the user that is identified in the SAML Logout Request.\n", + "value": "{\n \"query_string\" : \"SAMLRequest=nZFda4MwFIb%2FiuS%2BmviRpqFaClKQdbvo2g12M2KMraCJ9cRR9utnW4Wyi13sMie873MeznJ1aWrnS3VQGR0j4mLkKC1NUeljjA77zYyhVbIE0dR%2By7fmaHq7U%2BdegXWGpAZ%2B%2F4pR32luBFTAtWgUcCv56%2Fp5y30X87Yz1khTIycdgpUW9kY7WdsC9zxoXTvMvWuVV98YyMnSGH2SYE5pwALBIr9QKiwDGpW0oGVUznGeMyJZKFkQ4jBf5HnhUymjIhzCAL3KNFihbYx8TBYzzGaY7EnIyZwHzCWMfiDnbRIftkSjJr%2BFu0e9v%2B0EgOquRiiZjKpiVFp6j50T4WXoyNJ%2FEWC9fdqc1t%2F1%2B2F3aUpjzhPiXpqMz1%2FHSn4A&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=MsAYz2NFdovMG2mXf6TSpu5vlQQyEJAg%2B4KCwBqJTmrb3yGXKUtIgvjqf88eCAK32v3eN8vupjPC8LglYmke1ZnjK0%2FKxzkvSjTVA7mMQe2AQdKbkyC038zzRq%2FYHcjFDE%2Bz0qISwSHZY2NyLePmwU7SexEXnIz37jKC6NMEhus%3D\",\n \"realm\" : \"saml1\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -206615,6 +208337,12 @@ ] }, "description": "Logout of SAML.\n\nSubmits a request to invalidate an access token and refresh token.\n\nNOTE: This API is intended for use by custom web applications other than Kibana.\nIf you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.\n\nThis API invalidates the tokens that were generated for a user by the SAML authenticate API.\nIf the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout).", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/saml/logout` to invalidate the pair of tokens that were generated by calling the SAML authenticate API with a successful SAML response.\n", + "value": "{\n \"token\" : \"46ToAxZVaXVVZTVKOVF5YU04ZFJVUDVSZlV3\",\n \"refresh_token\" : \"mJdXLtmvTUSpoLwMvdBt_w\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -206701,6 +208429,18 @@ ] }, "description": "Prepare SAML authentication.\n\nCreate a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch.\n\nNOTE: This API is intended for use by custom web applications other than Kibana.\nIf you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.\n\nThis API returns a URL pointing to the SAML Identity Provider.\nYou can use the URL to redirect the browser of the user in order to continue the authentication process.\nThe URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded.\nIf the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`.\nThese parameters contain the algorithm used for the signature and the signature value itself.\nIt also returns a random string that uniquely identifies this SAML Authentication request.\nThe caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/saml/prepare` to generate a SAML authentication request for the SAML realm named `saml1`.\n", + "summary": "Prepare with a realm", + "value": "{\n \"realm\" : \"saml1\"\n}" + }, + "RequestExample2": { + "description": "Run `POST /_security/saml/prepare` to generate a SAML authentication request for the SAML realm with an Assertion Consuming Service (ACS) URL.\n", + "summary": "Prepare with an ACS", + "value": "{\n \"acs\" : \"https://kibana.org/api/security/saml/callback\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -206963,6 +208703,12 @@ ] }, "description": "Suggest a user profile.\n\nGet suggestions for user profiles that match specified search criteria.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/profile/_suggest` to get suggestions for profile documents with name-related fields matching `jack`. It specifies both `uids` and `labels` hints for better relevance. The `labels` hint ranks profiles higher if their `direction` label matches either `north` or `east`.\n", + "value": "{\n \"name\": \"jack\", \n \"hint\": {\n \"uids\": [ \n \"u_8RKO7AKfEbSiIHZkZZ2LJy2MUSDPWDr3tMI_CkIGApU_0\",\n \"u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0\"\n ],\n \"labels\": {\n \"direction\": [\"north\", \"east\"] \n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -207147,6 +208893,18 @@ ] }, "description": "Update an API key.\n\nUpdate attributes of an existing API key.\nThis API supports updates to an API key's access scope, expiration, and metadata.\n\nTo use this API, you must have at least the `manage_own_api_key` cluster privilege.\nUsers can only update API keys that they created or that were granted to them.\nTo update another user’s API key, use the `run_as` feature to submit a request on behalf of another user.\n\nIMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required.\n\nUse this API to update API keys created by the create API key or grant API Key APIs.\nIf you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead.\nIt's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API.\n\nThe access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request.\nThe snapshot of the owner's permissions is updated automatically on every call.\n\nIMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope.\nThis change can occur if the owner user's permissions have changed since the API key was created or last modified.", + "examples": { + "RequestExample1": { + "description": "Run `PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx` to assign new role descriptors and metadata to an API key.\n", + "summary": "Update role and metadata", + "value": "{\n \"role_descriptors\": {\n \"role-a\": {\n \"indices\": [\n {\n \"names\": [\"*\"],\n \"privileges\": [\"write\"]\n }\n ]\n }\n },\n \"metadata\": {\n \"environment\": {\n \"level\": 2,\n \"trusted\": true,\n \"tags\": [\"production\"]\n }\n }\n}" + }, + "RequestExample2": { + "description": "Run `PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx` to remove the API key's previously assigned permissions. It will inherit the owner user's full permissions.\n", + "summary": "Remove permissions", + "value": "{\n \"role_descriptors\": {}\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -207246,6 +209004,12 @@ ] }, "description": "Update a cross-cluster API key.\n\nUpdate the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access.\n\nTo use this API, you must have at least the `manage_security` cluster privilege.\nUsers can only update API keys that they created.\nTo update another user's API key, use the `run_as` feature to submit a request on behalf of another user.\n\nIMPORTANT: It's not possible to use an API key as the authentication credential for this API.\nTo update an API key, the owner user's credentials are required.\n\nIt's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API.\n\nThis API supports updates to an API key's access scope, metadata, and expiration.\nThe owner user's information, such as the `username` and `realm`, is also updated automatically on every call.\n\nNOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.", + "examples": { + "UpdateCrossClusterApiKeyRequestExample1": { + "description": "Run `PUT /_security/cross_cluster/api_key/VuaCfGcBCdbkQm-e5aOx` to update a cross-cluster API key, assigning it new access scope and metadata.\n", + "value": "{\n \"access\": {\n \"replication\": [\n {\n \"names\": [\"archive\"]\n }\n ]\n },\n \"metadata\": {\n \"application\": \"replication\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -207345,6 +209109,12 @@ ] }, "description": "Update security index settings.\n\nUpdate the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`.\n\nNOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates.\n\nIf a specific index is not in use on the system and settings are provided for it, the request will be rejected.\nThis API does not yet support configuring the settings for indices before they are in use.", + "examples": { + "SecurityUpdateSettingsRequestExample1": { + "description": "Run `PUT /_security/settings` to modify the security settings.", + "value": "{\n \"security\": {\n \"index.auto_expand_replicas\": \"0-all\"\n },\n \"security-tokens\": {\n \"index.auto_expand_replicas\": \"0-all\"\n },\n \"security-profile\": {\n \"index.auto_expand_replicas\": \"0-all\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -207457,6 +209227,12 @@ ] }, "description": "Update user profile data.\n\nUpdate specific data for the user profile that is associated with a unique ID.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nTo use this API, you must have one of the following privileges:\n\n* The `manage_user_profile` cluster privilege.\n* The `update_profile_data` global privilege for the namespaces that are referenced in the request.\n\nThis API updates the `labels` and `data` fields of an existing user profile document with JSON objects.\nNew keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request.\n\nFor both labels and data, content is namespaced by the top-level fields.\nThe `update_profile_data` global privilege grants privileges for updating only the allowed namespaces.", + "examples": { + "RequestExample1": { + "description": "Run `POST /_security/profile/u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0/_data` to update a profile document for the `u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0` user profile.\n", + "value": "{\n \"labels\": {\n \"direction\": \"east\"\n },\n \"data\": {\n \"app1\": {\n \"theme\": \"default\"\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -207984,6 +209760,12 @@ ] }, "description": "Prepare a node to be shut down.\n\nNOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.\n\nIf you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster.\n\nIf the operator privileges feature is enabled, you must be an operator to use this API.\n\nThe API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster.\nThis ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster.\n\nYou must specify the type of shutdown: `restart`, `remove`, or `replace`.\nIf a node is already being prepared for shutdown, you can use this API to change the shutdown type.\n\nIMPORTANT: This API does NOT terminate the Elasticsearch process.\nMonitor the node shutdown status to determine when it is safe to stop Elasticsearch.", + "examples": { + "ShutdownPutNodeRequestExample1": { + "description": "Register a node for shutdown with `PUT /_nodes/USpTGYaBSIKbgSUJR2Z9lg/shutdown`. The `restart` type prepares the node to be restarted.\n", + "value": "{\n \"type\": \"restart\",\n \"reason\": \"Demonstrating how the node shutdown API works\",\n \"allocation_delay\": \"20m\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -208321,6 +210103,28 @@ ] }, "description": "Simulate data ingestion.\nRun ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.\n\nThis API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.\n\nThe API runs the default and final pipeline for that index against a set of documents provided in the body of the request.\nIf a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would.\nNo data is indexed into Elasticsearch.\nInstead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation.\nThe transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.\n\nThis API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline.\nThe simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.\n\nBy default, the pipeline definitions that are currently in the system are used.\nHowever, you can supply substitute pipeline definitions in the body of the request.\nThese will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.", + "examples": { + "SimulateIngestRequestExample1": { + "description": "In this example the index `my-index` has a default pipeline called `my-pipeline` and a final pipeline called `my-final-pipeline`. Since both documents are being ingested into `my-index`, both pipelines are run using the pipeline definitions that are already in the system.", + "summary": "Existing pipeline definitions", + "value": "{\n \"docs\": [\n {\n \"_id\": 123,\n \"_index\": \"my-index\",\n \"_source\": {\n \"foo\": \"bar\"\n }\n },\n {\n \"_id\": 456,\n \"_index\": \"my-index\",\n \"_source\": {\n \"foo\": \"rab\"\n }\n }\n ]\n}" + }, + "SimulateIngestRequestExample2": { + "description": "In this example the index `my-index` has a default pipeline called `my-pipeline` and a final pipeline called `my-final-pipeline`. But a substitute definition of `my-pipeline` is provided in `pipeline_substitutions`. The substitute `my-pipeline` will be used in place of the `my-pipeline` that is in the system, and then the `my-final-pipeline` that is already defined in the system will run.", + "summary": "Pipeline substitions", + "value": "{\n \"docs\": [\n {\n \"_index\": \"my-index\",\n \"_id\": 123,\n \"_source\": {\n \"foo\": \"bar\"\n }\n },\n {\n \"_index\": \"my-index\",\n \"_id\": 456,\n \"_source\": {\n \"foo\": \"rab\"\n }\n }\n ],\n \"pipeline_substitutions\": {\n \"my-pipeline\": {\n \"processors\": [\n {\n \"uppercase\": {\n \"field\": \"foo\"\n }\n }\n ]\n }\n }\n}" + }, + "SimulateIngestRequestExample3": { + "description": "In this example, imagine that the index `my-index` has a strict mapping with only the `foo` keyword field defined. Say that field mapping came from a component template named `my-mappings-template`. You want to test adding a new field, `bar`. So a substitute definition of `my-mappings-template` is provided in `component_template_substitutions`. The substitute `my-mappings-template` will be used in place of the existing mapping for `my-index` and in place of the `my-mappings-template` that is in the system.\n", + "summary": "Component template substitutions", + "value": "{\n \"docs\": [\n {\n \"_index\": \"my-index\",\n \"_id\": \"123\",\n \"_source\": {\n \"foo\": \"foo\"\n }\n },\n {\n \"_index\": \"my-index\",\n \"_id\": \"456\",\n \"_source\": {\n \"bar\": \"rab\"\n }\n }\n ],\n \"component_template_substitutions\": {\n \"my-mappings_template\": {\n \"template\": {\n \"mappings\": {\n \"dynamic\": \"strict\",\n \"properties\": {\n \"foo\": {\n \"type\": \"keyword\"\n },\n \"bar\": {\n \"type\": \"keyword\"\n }\n }\n }\n }\n }\n }\n}" + }, + "SimulateIngestRequestExample4": { + "description": "The pipeline, component template, and index template substitutions replace the existing pipeline details for the duration of this request.", + "summary": "Multiple substitutions", + "value": "{\n \"docs\": [\n {\n \"_id\": \"id\",\n \"_index\": \"my-index\",\n \"_source\": {\n \"foo\": \"bar\"\n }\n },\n {\n \"_id\": \"id\",\n \"_index\": \"my-index\",\n \"_source\": {\n \"foo\": \"rab\"\n }\n }\n ],\n \"pipeline_substitutions\": {\n \"my-pipeline\": {\n \"processors\": [\n {\n \"set\": {\n \"field\": \"field3\",\n \"value\": \"value3\"\n }\n }\n ]\n }\n },\n \"component_template_substitutions\": {\n \"my-component-template\": {\n \"template\": {\n \"mappings\": {\n \"dynamic\": true,\n \"properties\": {\n \"field3\": {\n \"type\": \"keyword\"\n }\n }\n },\n \"settings\": {\n \"index\": {\n \"default_pipeline\": \"my-pipeline\"\n }\n }\n }\n }\n },\n \"index_template_substitutions\": {\n \"my-index-template\": {\n \"index_patterns\": [\n \"my-index-*\"\n ],\n \"composed_of\": [\n \"component_template_1\",\n \"component_template_2\"\n ]\n }\n },\n \"mapping_addition\": {\n \"dynamic\": \"strict\",\n \"properties\": {\n \"foo\": {\n \"type\": \"keyword\"\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -209643,6 +211447,18 @@ ] }, "description": "Create or update a policy.\nCreate or update a snapshot lifecycle policy.\nIf the policy already exists, this request increments the policy version.\nOnly the latest version of a policy is stored.", + "examples": { + "PutSnapshotLifecycleRequestExample1 copy": { + "description": "Run `PUT /_slm/policy/daily-snapshots` to create a lifecycle policy. The `schedule` is when the snapshot should be taken, in this case, 1:30am daily. The `retention` details specify to: keep snapshots for 30 days; always keep at least 5 successful snapshots, even if they're more than 30 days old; keep no more than 50 successful snapshots, even if they're less than 30 days old.\n", + "summary": "Create a policy", + "value": "{\n \"schedule\": \"0 30 1 * * ?\",\n \"name\": \"\",\n \"repository\": \"my_repository\",\n \"config\": {\n \"indices\": [\"data-*\", \"important\"],\n \"ignore_unavailable\": false,\n \"include_global_state\": false\n },\n \"retention\": {\n \"expire_after\": \"30d\",\n \"min_count\": 5,\n \"max_count\": 50\n }\n}" + }, + "PutSnapshotLifecycleRequestExample2": { + "description": "Run `PUT /_slm/policy/hourly-snapshots` to create a lifecycle policy that uses interval scheduling. It creates a snapshot once every hour. The first snapshot will be created one hour after the policy is modified, with subsequent snapshots every hour afterward.\n", + "summary": "Create a policy with intevals", + "value": "{\n \"schedule\": \"1h\",\n \"name\": \"\",\n \"repository\": \"my_repository\",\n \"config\": {\n \"indices\": [\"data-*\", \"important\"]\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -212016,6 +213832,12 @@ ] }, "description": "Clone a snapshot.\nClone part of all of a snapshot into another snapshot in the same repository.", + "examples": { + "SnapshotCloneRequestExample1": { + "description": "Run `PUT /_snapshot/my_repository/source_snapshot/_clone/target_snapshot` to clone the `source_snapshot` into a new `target_snapshot`.", + "value": "{\n \"indices\": \"index_a,index_b\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -212216,6 +214038,12 @@ ] }, "description": "Create a snapshot.\nTake a snapshot of a cluster or of data streams and indices.", + "examples": { + "SnapshotCreateRequestExample1": { + "description": "Run `PUT /_snapshot/my_repository/snapshot_2?wait_for_completion=true` to take a snapshot of `index_1` and `index_2`.", + "value": "{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": false,\n \"metadata\": {\n \"taken_by\": \"user123\",\n \"taken_because\": \"backup before upgrading\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -212342,6 +214170,38 @@ } }, "description": "Create or update a snapshot repository.\nIMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters.\nTo register a snapshot repository, the cluster's global metadata must be writeable.\nEnsure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access.\n\nSeveral options for this API can be specified using a query parameter or a request body parameter.\nIf both parameters are specified, only the query parameter is used.", + "examples": { + "SnapshotCreateRepositoryRequestExample1": { + "description": "Run `PUT /_snapshot/my_repository` to create or update a shared file system snapshot repository.", + "summary": "A shared file system repository", + "value": "{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_backup_location\"\n }\n}" + }, + "SnapshotCreateRepositoryRequestExample2": { + "description": "Run `PUT /_snapshot/my_repository` to create or update an Azure snapshot repository.", + "summary": "An Azure repository", + "value": "{\n \"type\": \"azure\",\n \"settings\": {\n \"client\": \"secondary\"\n }\n}" + }, + "SnapshotCreateRepositoryRequestExample3": { + "description": "Run `PUT /_snapshot/my_gcs_repository` to create or update a Google Cloud Storage snapshot repository.", + "summary": "A Google Cloud Storage repository", + "value": "{\n \"type\": \"gcs\",\n \"settings\": {\n \"bucket\": \"my_other_bucket\",\n \"base_path\": \"dev\"\n }\n}" + }, + "SnapshotCreateRepositoryRequestExample4": { + "description": "Run `PUT /_snapshot/my_s3_repository` to create or update an AWS S3 snapshot repository.", + "summary": "An S3 repository", + "value": "{\n \"type\": \"s3\",\n \"settings\": {\n \"bucket\": \"my-bucket\"\n }\n}" + }, + "SnapshotCreateRepositoryRequestExample5": { + "description": "Run `PUT _snapshot/my_src_only_repository` to create or update a source-only snapshot repository.", + "summary": "A source-only repository", + "value": "{\n \"type\": \"source\",\n \"settings\": {\n \"delegate_type\": \"fs\",\n \"location\": \"my_backup_repository\"\n }\n}" + }, + "SnapshotCreateRepositoryRequestExample6": { + "description": "Run `PUT _snapshot/my_read_only_url_repository` to create or update a read-only URL snapshot repository.", + "summary": "A read-only URL repository", + "value": "{\n \"type\": \"url\",\n \"settings\": {\n \"url\": \"file:/mount/backups/my_fs_backup_location\"\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -214585,6 +216445,18 @@ ] }, "description": "Restore a snapshot.\nRestore a snapshot of a cluster or data streams and indices.\n\nYou can restore a snapshot only to a running cluster with an elected master node.\nThe snapshot repository must be registered and available to the cluster.\nThe snapshot and cluster versions must be compatible.\n\nTo restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks.\n\nBefore you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API:\n\n```\nGET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream\n```\n\nIf no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices.\n\nIf your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot.", + "examples": { + "SnapshotRestoreRequestExample1": { + "description": "Run `POST /_snapshot/my_repository/snapshot_2/_restore?wait_for_completion=true`. It restores `index_1` and `index_2` from `snapshot_2`. The `rename_pattern` and `rename_replacement` parameters indicate any index matching the regular expression `index_(.+)` will be renamed using the pattern `restored_index_$1`. For example, `index_1` will be renamed to `restored_index_1`.\n", + "summary": "Restore with rename pattern", + "value": "{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": false,\n \"rename_pattern\": \"index_(.+)\",\n \"rename_replacement\": \"restored_index_$1\",\n \"include_aliases\": false\n}" + }, + "SnapshotRestoreRequestExample2": { + "description": "Close `index_1` then run `POST /_snapshot/my_repository/snapshot_2/_restore?wait_for_completion=true` to restore an index in-place. For example, you might want to perform this type of restore operation when no alternative options surface after the cluster allocation explain API reports `no_valid_shard_copy`.\n", + "summary": "Restore in-place", + "value": "{\n \"indices\": \"index_1\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -215025,6 +216897,12 @@ ] }, "description": "Clear an SQL search cursor.", + "examples": { + "ClearSqlCursorRequestExample1": { + "description": "Run `POST _sql/close` to clear an SQL search cursor.", + "value": "{\n \"cursor\": \"sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -215674,6 +217552,12 @@ ] }, "description": "Get SQL search results.\nRun an SQL request.", + "examples": { + "QuerySqlRequestExample1": { + "description": "Run `POST _sql?format=txt` to get results for an SQL search.", + "value": "{\n \"query\": \"SELECT * FROM library ORDER BY page_count DESC LIMIT 5\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -215891,6 +217775,15 @@ ] }, "description": "Translate SQL into Elasticsearch queries.\nTranslate an SQL search into a search API request containing Query DSL.\nIt accepts the same request body parameters as the SQL search API, excluding `cursor`.", + "examples": { + "TranslateSqlRequestExample1": { + "description": "", + "method_request": "POST _sql/translate", + "summary": "sql/apis/sql-translate-api.asciidoc:12", + "type": "request", + "value": "{\n \"query\": \"SELECT * FROM library ORDER BY page_count DESC\",\n \"fetch_size\": 10\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -216839,6 +218732,15 @@ ] }, "description": "Create or update a synonym rule.\nCreate or update a synonym rule in a synonym set.\n\nIf any of the synonym rules included is invalid, the API returns an error.\n\nWhen you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.", + "examples": { + "SynonymRulePutRequestExample1": { + "description": "", + "method_request": "PUT _synonyms/my-synonyms-set/test-1", + "summary": "synonyms/apis/put-synonym-rule.asciidoc:107", + "type": "request", + "value": "{\n \"synonyms\": \"hello, hi, howdy\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -217590,6 +219492,18 @@ } ] }, + "examples": { + "GetTaskResponseExample1": { + "description": "A successful response from `GET _tasks?actions=cluster:*`, which retrieves all cluster-related tasks.\n", + "summary": "Get cluster actions", + "value": "{\n \"nodes\" : {\n \"oTUltX4IQMOUUVeiohTt8A\" : {\n \"name\" : \"H5dfFeA\",\n \"transport_address\" : \"127.0.0.1:9300\",\n \"host\" : \"127.0.0.1\",\n \"ip\" : \"127.0.0.1:9300\",\n \"tasks\" : {\n \"oTUltX4IQMOUUVeiohTt8A:124\" : {\n \"node\" : \"oTUltX4IQMOUUVeiohTt8A\",\n \"id\" : 124,\n \"type\" : \"direct\",\n \"action\" : \"cluster:monitor/tasks/lists[n]\",\n \"start_time_in_millis\" : 1458585884904,\n \"running_time_in_nanos\" : 47402,\n \"cancellable\" : false,\n \"parent_task_id\" : \"oTUltX4IQMOUUVeiohTt8A:123\"\n },\n \"oTUltX4IQMOUUVeiohTt8A:123\" : {\n \"node\" : \"oTUltX4IQMOUUVeiohTt8A\",\n \"id\" : 123,\n \"type\" : \"transport\",\n \"action\" : \"cluster:monitor/tasks/lists\",\n \"start_time_in_millis\" : 1458585884904,\n \"running_time_in_nanos\" : 236042,\n \"cancellable\" : false\n }\n }\n }\n }\n}" + }, + "GetTaskResponseExample2": { + "description": "A successful response from `GET _tasks?detailed=true&actions=*/delete/byquery`, which gets the status of a delete by query operation. The `status` object contains the actual status. `total` is the total number of operations that the reindex expects to perform. You can estimate the progress by adding the `updated`, `created`, and `deleted` fields. The request will finish when their sum is equal to the `total` field.\n", + "summary": "Get details about a delete by query", + "value": "{\n \"nodes\" : {\n \"r1A2WoRbTwKZ516z6NEs5A\" : {\n \"name\" : \"r1A2WoR\",\n \"transport_address\" : \"127.0.0.1:9300\",\n \"host\" : \"127.0.0.1\",\n \"ip\" : \"127.0.0.1:9300\",\n \"attributes\" : {\n \"testattr\" : \"test\",\n \"portsfile\" : \"true\"\n },\n \"tasks\" : {\n \"r1A2WoRbTwKZ516z6NEs5A:36619\" : {\n \"node\" : \"r1A2WoRbTwKZ516z6NEs5A\",\n \"id\" : 36619,\n \"type\" : \"transport\",\n \"action\" : \"indices:data/write/delete/byquery\",\n \"status\" : { \n \"total\" : 6154,\n \"updated\" : 0,\n \"created\" : 0,\n \"deleted\" : 3500,\n \"batches\" : 36,\n \"version_conflicts\" : 0,\n \"noops\" : 0,\n \"retries\": 0,\n \"throttled_millis\": 0\n },\n \"description\" : \"\"\n }\n }\n }\n }\n}" + } + }, "name": { "name": "Response", "namespace": "tasks.get" @@ -217734,6 +219648,12 @@ } } }, + "examples": { + "ListTasksResponseExample1": { + "description": "A successful response from `GET _tasks?actions=*search&detailed` The `detailed` parameter affects the `description` field, which contains human readable text that identifies the particular request that the task is performing. For example, it helps identify the search request being performed by a search task.\n", + "value": "{\n \"nodes\" : {\n \"oTUltX4IQMOUUVeiohTt8A\" : {\n \"name\" : \"H5dfFeA\",\n \"transport_address\" : \"127.0.0.1:9300\",\n \"host\" : \"127.0.0.1\",\n \"ip\" : \"127.0.0.1:9300\",\n \"tasks\" : {\n \"oTUltX4IQMOUUVeiohTt8A:464\" : {\n \"node\" : \"oTUltX4IQMOUUVeiohTt8A\",\n \"id\" : 464,\n \"type\" : \"transport\",\n \"action\" : \"indices:data/read/search\",\n \"description\" : \"indices[test], types[test], search_type[QUERY_THEN_FETCH], source[{\\\"query\\\":...}]\",\n \"start_time_in_millis\" : 1483478610008,\n \"running_time_in_nanos\" : 13991383,\n \"cancellable\" : true,\n \"cancelled\" : false\n }\n }\n }\n }\n}" + } + }, "name": { "name": "Response", "namespace": "tasks.list" @@ -218335,6 +220255,12 @@ ] }, "description": "Find the structure of text messages.\nFind the structure of a list of text messages.\nThe messages must contain data that is suitable to be ingested into Elasticsearch.\n\nThis API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality.\nUse this API rather than the find text structure API if your input text has already been split up into separate messages by some other process.\n\nThe response from the API contains:\n\n* Sample messages.\n* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.\n* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.\nAppropriate mappings for an Elasticsearch index, which you could use to ingest the text.\n\nAll this information can be calculated by the structure finder with no guidance.\nHowever, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.\n\nIf the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response.\nIt helps determine why the returned structure was chosen.", + "examples": { + "FindMessageStructureRequestExample1": { + "description": "Run `POST _text_structure/find_message_structure` to analyze Elasticsearch log files.\n", + "value": "{\n \"messages\": [\n \"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\",\n \"[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]\",\n \"[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]\",\n \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]\",\n \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]\",\n \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]\",\n \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]\",\n \"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]\",\n \"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]\",\n \"[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]\",\n \"[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled\",\n \"[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled\",\n \"[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled\",\n \"[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]\",\n \"[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]\",\n \"[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized\",\n \"[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ...\"\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -218697,6 +220623,12 @@ } }, "description": "Find the structure of a text file.\nThe text file must contain data that is suitable to be ingested into Elasticsearch.\n\nThis API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality.\nUnlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format.\nIt must, however, be text; binary text formats are not currently supported.\nThe size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb.\n\nThe response from the API contains:\n\n* A couple of messages from the beginning of the text.\n* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.\n* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.\n* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.\n\nAll this information can be calculated by the structure finder with no guidance.\nHowever, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.", + "examples": { + "FindStructureRequestExample1": { + "description": "Run `POST _text_structure/find_structure` to analyze newline-delimited JSON text.", + "value": "{\"name\": \"Leviathan Wakes\", \"author\": \"James S.A. Corey\", \"release_date\": \"2011-06-02\", \"page_count\": 561}\n{\"name\": \"Hyperion\", \"author\": \"Dan Simmons\", \"release_date\": \"1989-05-26\", \"page_count\": 482}\n{\"name\": \"Dune\", \"author\": \"Frank Herbert\", \"release_date\": \"1965-06-01\", \"page_count\": 604}\n{\"name\": \"Dune Messiah\", \"author\": \"Frank Herbert\", \"release_date\": \"1969-10-15\", \"page_count\": 331}\n{\"name\": \"Children of Dune\", \"author\": \"Frank Herbert\", \"release_date\": \"1976-04-21\", \"page_count\": 408}\n{\"name\": \"God Emperor of Dune\", \"author\": \"Frank Herbert\", \"release_date\": \"1981-05-28\", \"page_count\": 454}\n{\"name\": \"Consider Phlebas\", \"author\": \"Iain M. Banks\", \"release_date\": \"1987-04-23\", \"page_count\": 471}\n{\"name\": \"Pandora's Star\", \"author\": \"Peter F. Hamilton\", \"release_date\": \"2004-03-02\", \"page_count\": 768}\n{\"name\": \"Revelation Space\", \"author\": \"Alastair Reynolds\", \"release_date\": \"2000-03-15\", \"page_count\": 585}\n{\"name\": \"A Fire Upon the Deep\", \"author\": \"Vernor Vinge\", \"release_date\": \"1992-06-01\", \"page_count\": 613}\n{\"name\": \"Ender's Game\", \"author\": \"Orson Scott Card\", \"release_date\": \"1985-06-01\", \"page_count\": 324}\n{\"name\": \"1984\", \"author\": \"George Orwell\", \"release_date\": \"1985-06-01\", \"page_count\": 328}\n{\"name\": \"Fahrenheit 451\", \"author\": \"Ray Bradbury\", \"release_date\": \"1953-10-15\", \"page_count\": 227}\n{\"name\": \"Brave New World\", \"author\": \"Aldous Huxley\", \"release_date\": \"1932-06-01\", \"page_count\": 268}\n{\"name\": \"Foundation\", \"author\": \"Isaac Asimov\", \"release_date\": \"1951-06-01\", \"page_count\": 224}\n{\"name\": \"The Giver\", \"author\": \"Lois Lowry\", \"release_date\": \"1993-04-26\", \"page_count\": 208}\n{\"name\": \"Slaughterhouse-Five\", \"author\": \"Kurt Vonnegut\", \"release_date\": \"1969-06-01\", \"page_count\": 275}\n{\"name\": \"The Hitchhiker's Guide to the Galaxy\", \"author\": \"Douglas Adams\", \"release_date\": \"1979-10-12\", \"page_count\": 180}\n{\"name\": \"Snow Crash\", \"author\": \"Neal Stephenson\", \"release_date\": \"1992-06-01\", \"page_count\": 470}\n{\"name\": \"Neuromancer\", \"author\": \"William Gibson\", \"release_date\": \"1984-07-01\", \"page_count\": 271}\n{\"name\": \"The Handmaid's Tale\", \"author\": \"Margaret Atwood\", \"release_date\": \"1985-06-01\", \"page_count\": 311}\n{\"name\": \"Starship Troopers\", \"author\": \"Robert A. Heinlein\", \"release_date\": \"1959-12-01\", \"page_count\": 335}\n{\"name\": \"The Left Hand of Darkness\", \"author\": \"Ursula K. Le Guin\", \"release_date\": \"1969-06-01\", \"page_count\": 304}\n{\"name\": \"The Moon is a Harsh Mistress\", \"author\": \"Robert A. Heinlein\", \"release_date\": \"1966-04-01\", \"page_count\": 288}" + } + }, "generics": [ { "name": "TJsonDocument", @@ -219315,6 +221247,12 @@ ] }, "description": "Test a Grok pattern.\nTest a Grok pattern on one or more lines of text.\nThe API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings.", + "examples": { + "TestGrokPatternRequestExample1": { + "description": "Run `GET _text_structure/test_grok_pattern` to test a Grok pattern.", + "value": "{\n \"grok_pattern\": \"Hello %{WORD:first_name} %{WORD:last_name}\",\n \"text\": [\n \"Hello John Doe\",\n \"this does not match\"\n ]\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -221059,6 +222997,12 @@ ] }, "description": "Preview a transform.\nGenerates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.", + "examples": { + "PreviewTransformRequestExample1": { + "description": "Run `POST _transform/_preview` to preview a transform that uses the pivot method.", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\"\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\",\n \"missing_bucket\": true\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -221276,6 +223220,18 @@ ] }, "description": "Create a transform.\nCreates a transform.\n\nA transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as\na data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a\nunique row per entity.\n\nYou must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If\nyou choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in\nthe pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values\nin the latest object.\n\nYou must have `create_index`, `index`, and `read` privileges on the destination index and `read` and\n`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the\ntransform remembers which roles the user that created it had at the time of creation and uses those same roles. If\nthose roles do not have the required privileges on the source and destination indices, the transform fails when it\nattempts unauthorized operations.\n\nNOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any\n`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do\nnot give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not\ngive users any privileges on `.data-frame-internal*` indices.", + "examples": { + "PutTransformRequestExample1": { + "description": "Run `PUT _transform/ecommerce_transform1` to create a transform that uses the pivot method.", + "summary": "A pivot transform", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\",\n \"query\": {\n \"term\": {\n \"geoip.continent_name\": {\n \"value\": \"Asia\"\n }\n }\n }\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\",\n \"missing_bucket\": true\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n },\n \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform1\",\n \"pipeline\": \"add_timestamp_pipeline\"\n },\n \"frequency\": \"5m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"60s\"\n }\n },\n \"retention_policy\": {\n \"time\": {\n \"field\": \"order_date\",\n \"max_age\": \"30d\"\n }\n }\n}" + }, + "PutTransformRequestExample2": { + "description": "Run `PUT _transform/ecommerce_transform2` to create a transform that uses the latest method.", + "summary": "A latest transform", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\"\n },\n \"latest\": {\n \"unique_key\": [\n \"customer_id\"\n ],\n \"sort\": \"order_date\"\n },\n \"description\": \"Latest order for each customer\",\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform2\"\n },\n \"frequency\": \"5m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"60s\"\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -221817,6 +223773,12 @@ ] }, "description": "Update a transform.\nUpdates certain properties of a transform.\n\nAll updated properties except `description` do not take effect until after the transform starts the next checkpoint,\nthus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata`\nprivileges for the source indices. You must also have `index` and `read` privileges for the destination index. When\nElasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the\ntime of update and runs with those privileges.", + "examples": { + "UpdateTransformRequestExample1": { + "description": "Run `POST _transform/simple-kibana-ecomm-pivot/_update` to update a transform that uses the pivot method.", + "value": "{\n \"source\": {\n \"index\": \"kibana_sample_data_ecommerce\",\n \"query\": {\n \"term\": {\n \"geoip.continent_name\": {\n \"value\": \"Asia\"\n }\n }\n }\n },\n \"pivot\": {\n \"group_by\": {\n \"customer_id\": {\n \"terms\": {\n \"field\": \"customer_id\",\n \"missing_bucket\": true\n }\n }\n },\n \"aggregations\": {\n \"max_price\": {\n \"max\": {\n \"field\": \"taxful_total_price\"\n }\n }\n }\n },\n \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n \"dest\": {\n \"index\": \"kibana_sample_data_ecommerce_transform1\",\n \"pipeline\": \"add_timestamp_pipeline\"\n },\n \"frequency\": \"5m\",\n \"sync\": {\n \"time\": {\n \"field\": \"order_date\",\n \"delay\": \"60s\"\n }\n },\n \"retention_policy\": {\n \"time\": {\n \"field\": \"order_date\",\n \"max_age\": \"30d\"\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -226979,6 +228941,23 @@ ] }, "description": "Run a watch.\nThis API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.\n\nFor testing and debugging purposes, you also have fine-grained control on how the watch runs.\nYou can run the watch without running all of its actions or alternatively by simulating them.\nYou can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.\n\nYou can use the run watch API to run watches that are not yet registered by specifying the watch definition inline.\nThis serves as great tool for testing and debugging your watches prior to adding them to Watcher.\n\nWhen Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches.\nIf your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch.\n\nWhen using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.", + "examples": { + "WatcherExecuteRequestExample1": { + "description": "Run `POST _watcher/watch/my_watch/_execute` to run a watch. The input defined in the watch is ignored and the `alternative_input` is used as the payload. The condition as defined by the watch is ignored and is assumed to evaluate to true. The `force_simulate` action forces the simulation of `my-action`. Forcing the simulation means that throttling is ignored and the watch is simulated by Watcher instead of being run normally.\n", + "summary": "Run a watch", + "value": "{\n \"trigger_data\" : { \n \"triggered_time\" : \"now\",\n \"scheduled_time\" : \"now\"\n },\n \"alternative_input\" : { \n \"foo\" : \"bar\"\n },\n \"ignore_condition\" : true, \n \"action_modes\" : {\n \"my-action\" : \"force_simulate\" \n },\n \"record_execution\" : true \n}" + }, + "WatcherExecuteRequestExample2": { + "description": "Run `POST _watcher/watch/my_watch/_execute` and set a different mode for each action.\n", + "summary": "Run a watch with multiple action modes", + "value": "{\n \"action_modes\" : {\n \"action1\" : \"force_simulate\",\n \"action2\" : \"skip\"\n }\n}" + }, + "WatcherExecuteRequestExample3": { + "description": "Run `POST _watcher/watch/_execute` to run a watch inline. All other settings for this API still apply when inlining a watch. In this example, while the inline watch defines a compare condition, during the execution this condition will be ignored.\n", + "summary": "Run a watch inline", + "value": "{\n \"watch\" : {\n \"trigger\" : { \"schedule\" : { \"interval\" : \"10s\" } },\n \"input\" : {\n \"search\" : {\n \"request\" : {\n \"indices\" : [ \"logs\" ],\n \"body\" : {\n \"query\" : {\n \"match\" : { \"message\": \"error\" }\n }\n }\n }\n }\n },\n \"condition\" : {\n \"compare\" : { \"ctx.payload.hits.total\" : { \"gt\" : 0 }}\n },\n \"actions\" : {\n \"log_error\" : {\n \"logging\" : {\n \"text\" : \"Found {{ctx.payload.hits.total}} errors in the logs\"\n }\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -227245,6 +229224,12 @@ } ] }, + "examples": { + "WatcherGetSettingsResponseExample1": { + "description": "A successful response with two index settings.", + "value": "{\n \"index\": {\n \"auto_expand_replicas\": \"0-4\",\n \"number_of_replicas\": 0\n }\n}" + } + }, "name": { "name": "Response", "namespace": "watcher.get_settings" @@ -227504,6 +229489,12 @@ ] }, "description": "Create or update a watch.\nWhen a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine.\nTypically for the `schedule` trigger, the scheduler is the trigger engine.\n\nIMPORTANT: You must use Kibana or this API to create a watch.\nDo not add a watch directly to the `.watches` index by using the Elasticsearch index API.\nIf Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index.\n\nWhen you add a watch you can also define its initial active state by setting the *active* parameter.\n\nWhen Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges.\nIf the user is able to read index `a`, but not index `b`, the same will apply when the watch runs.", + "examples": { + "WatcherPutWatchRequestExample1": { + "description": "Run `PUT _watcher/watch/my-watch` add a watch. The watch schedule triggers every minute. The watch search input looks for any 404 HTTP responses that occurred in the last five minutes. The watch condition checks if any search hits where found. When found, the watch action sends an email to an administrator.\n", + "value": "{\n \"trigger\" : {\n \"schedule\" : { \"cron\" : \"0 0/1 * * * ?\" }\n },\n \"input\" : {\n \"search\" : {\n \"request\" : {\n \"indices\" : [\n \"logstash*\"\n ],\n \"body\" : {\n \"query\" : {\n \"bool\" : {\n \"must\" : {\n \"match\": {\n \"response\": 404\n }\n },\n \"filter\" : {\n \"range\": {\n \"@timestamp\": {\n \"from\": \"{{ctx.trigger.scheduled_time}}||-5m\",\n \"to\": \"{{ctx.trigger.triggered_time}}\"\n }\n }\n }\n }\n }\n }\n }\n }\n },\n \"condition\" : {\n \"compare\" : { \"ctx.payload.hits.total\" : { \"gt\" : 0 }}\n },\n \"actions\" : {\n \"email_admin\" : {\n \"email\" : {\n \"to\" : \"admin@domain.host.com\",\n \"subject\" : \"404 recently encountered\"\n }\n }\n }\n}" + } + }, "inherits": { "type": { "name": "RequestBase", @@ -228308,6 +230299,11 @@ ] }, "description": "Update Watcher index settings.\nUpdate settings for the Watcher internal index (`.watches`).\nOnly a subset of settings can be modified.\nThis includes `index.auto_expand_replicas` and `index.number_of_replicas`.", + "examples": { + "WatcherUpdateSettingsRequestExample1": { + "value": "{\n \"index.auto_expand_replicas\": \"0-4\"\n}" + } + }, "inherits": { "type": { "name": "RequestBase", diff --git a/src/scripts/.gitignore b/src/scripts/.gitignore new file mode 100644 index 0000000000..749ccdafd4 --- /dev/null +++ b/src/scripts/.gitignore @@ -0,0 +1,4 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class diff --git a/src/scripts/examples-stats/EndpointInfoGenerator.py b/src/scripts/examples-stats/EndpointInfoGenerator.py new file mode 100644 index 0000000000..272fdad03b --- /dev/null +++ b/src/scripts/examples-stats/EndpointInfoGenerator.py @@ -0,0 +1,95 @@ +import os +import re +from typing import List +from pathlib import Path +from constants import EXAMPLES_FOLDER +from dataclasses import dataclass, field + +@dataclass +class EndpointInfo: + path: str = None + has_examples_subfolder: bool = False + num_request_examples: int = 0 + num_response_examples: int = 0 + num_examples: int = 0 + examples_subfolders = [] + recognized_examples_subfolders: set[str] = field(default_factory=set) + examples_response_codes: set[str] = field(default_factory=set) + +class EndpointInfoGenerator: + def __init__(self): + self.spec_path = "." + + # Get all the folders in a path + def get_folders(self, path: str) -> List[Path]: + return [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))] + + def get_files(self, path: str) -> List[str]: + return [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))] + + def is_example_file(self, file: str) -> bool: + if file.endswith(".yaml"): + return True + print(f"WARNING: Found non-YAML example file: {file}") + return False + + def get_example_files_in_folder(self, path: str) -> List[str]: + example_files = [] + for file in self.get_files(path): + if self.is_example_file(file): + example_files.append(file) + return example_files + + def get_request_subfolder(self, endpoint_path) -> str: + examples_path = os.path.join(endpoint_path, EXAMPLES_FOLDER) + request_examples_path = os.path.join(examples_path, "request") + if os.path.exists(request_examples_path): + return "request" + return None + + def get_response_subfolders(self, endpoint_path) -> int: + examples_path = os.path.join(endpoint_path, EXAMPLES_FOLDER) + response_examples_folders = [] + response_examples_path = os.path.join(examples_path, "response") + if os.path.exists(response_examples_path): + response_examples_folders.append("response") + else: + examples_subfolders = self.get_folders(examples_path) + for examples_subfolder in examples_subfolders: + # Look for folders of the pattern "nnn_response" + if re.match(r"[0-9]{3}_response", examples_subfolder): + response_examples_folders.append(examples_subfolder) + return response_examples_folders + + def get_response_code_from_response_folder(self, folder: str) -> str: + if folder == "response": + return "200" + match = re.match(r"(\d{3})_response", folder) + if match: + return match.group(1) + raise Exception(f"Invalid response folder: {folder}") + + def get_endpoint_info(self, endpoint_path: str) -> EndpointInfo: + endpoint_path_relative_to_spec = os.path.relpath(endpoint_path, self.spec_path) + endpoint_info = EndpointInfo(path=endpoint_path_relative_to_spec) + # If there is no 'examples' folder, return EndpointInfo with + # default values + examples_path = os.path.join(endpoint_path, EXAMPLES_FOLDER) + if not os.path.exists(examples_path): + return endpoint_info + endpoint_info.examples_subfolders = self.get_folders(examples_path) + request_subfolder = self.get_request_subfolder(endpoint_path) + if request_subfolder: + endpoint_info.recognized_examples_subfolders.add(request_subfolder) + examples_request_path = os.path.join(examples_path, request_subfolder) + endpoint_info.num_request_examples = len(self.get_example_files_in_folder(examples_request_path)) + response_subfolders = self.get_response_subfolders(endpoint_path) + if (len(response_subfolders) > 0): + endpoint_info.recognized_examples_subfolders.update(response_subfolders) + for response_subfolder in response_subfolders: + response_code = self.get_response_code_from_response_folder(response_subfolder) + endpoint_info.examples_response_codes.add(response_code) + examples_response_path = os.path.join(examples_path, response_subfolder) + endpoint_info.num_response_examples += len(self.get_example_files_in_folder(examples_response_path)) + endpoint_info.num_examples = endpoint_info.num_request_examples + endpoint_info.num_response_examples + return endpoint_info diff --git a/src/scripts/examples-stats/EndpointPathsFinder.py b/src/scripts/examples-stats/EndpointPathsFinder.py new file mode 100644 index 0000000000..1be0fbb096 --- /dev/null +++ b/src/scripts/examples-stats/EndpointPathsFinder.py @@ -0,0 +1,45 @@ +import os +from typing import List +from pathlib import Path + +class EndpointPathsFinder: + def __init__(self): + self.spec_path = "." + + def get_folders(self, path: str) -> List[Path]: + return [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))] + + def is_endpoint_group_folder(self, folder: str) -> bool: + # Other than _global, any folder starting with underscore is + # not an endpoint group folder + if folder == "_global": + return True + if folder.startswith("_"): + return False + return True + + def get_endpoint_group_folders(self) -> List[Path]: + folders = self.get_folders(self.spec_path) + return [f for f in folders if self.is_endpoint_group_folder(f)] + + def is_endpoint_folder(self, folder: str) -> bool: + if folder == "_types": + return False + return True + + def get_endpoint_folders(self, path: str) -> List[Path]: + folders = self.get_folders(path) + return [f for f in folders if self.is_endpoint_folder(f)] + + def get_endpoint_paths(self) -> List[str]: + endpoint_paths = [] + group_folders = self.get_endpoint_group_folders() + for group_folder in group_folders: + group_path = os.path.join(self.spec_path, group_folder) + group_endpoint_folders = self.get_endpoint_folders(group_path) + for endpoint_folder in group_endpoint_folders: + endpoint_paths.append(os.path.join(group_path, endpoint_folder)) + return endpoint_paths + + def find_paths(self) -> List[str]: + return self.get_endpoint_paths() \ No newline at end of file diff --git a/src/scripts/examples-stats/ExamplesInfoGenerator.py b/src/scripts/examples-stats/ExamplesInfoGenerator.py new file mode 100644 index 0000000000..a020928f96 --- /dev/null +++ b/src/scripts/examples-stats/ExamplesInfoGenerator.py @@ -0,0 +1,31 @@ +from typing import List +from dataclasses import dataclass +from EndpointPathsFinder import EndpointPathsFinder +from EndpointInfoGenerator import EndpointInfo, EndpointInfoGenerator +from ExamplesStatsGenerator import ExampleStats, ExamplesStatsGenerator + +@dataclass +class ExamplesInfo: + endpoints_info: List[EndpointInfo] + stats: ExampleStats + +class ExamplesInfoGenerator: + def __init__(self): + self.endpoint_paths_finder = EndpointPathsFinder() + self.endpoint_info_processor = EndpointInfoGenerator() + + def get_examples_info(self) -> ExamplesInfo: + endpoint_paths_finder = EndpointPathsFinder() + endpoint_paths = endpoint_paths_finder.find_paths() + endpoint_info_list = [] + stats = ExampleStats() + for endpoint_path in endpoint_paths: + stats.num_endpoints += 1 + endpoint_info = self.endpoint_info_processor.get_endpoint_info(endpoint_path) + endpoint_info_list.append(endpoint_info) + + examples_stats_generator = ExamplesStatsGenerator(endpoint_info_list) + stats = examples_stats_generator.get_stats() + + examples_info = ExamplesInfo(endpoint_info_list, stats) + return examples_info \ No newline at end of file diff --git a/src/scripts/examples-stats/ExamplesStatsGenerator.py b/src/scripts/examples-stats/ExamplesStatsGenerator.py new file mode 100644 index 0000000000..e2bf5361d0 --- /dev/null +++ b/src/scripts/examples-stats/ExamplesStatsGenerator.py @@ -0,0 +1,49 @@ +from dataclasses import dataclass +from typing import List +from EndpointInfoGenerator import EndpointInfo + +@dataclass +class ExampleStats: + num_endpoints: int = 0 + num_endpoints_with_examples: int = 0 + num_endpoints_with_no_examples: int = 0 + num_endpoints_with_only_request_examples: int = 0 + num_endpoints_with_only_response_examples: int = 0 + num_endpoints_with_both_request_and_response_examples: int = 0 + num_endpoints_with_non_200_response_code_examples: int = 0 + num_request_examples: int = 0 + num_response_examples: int = 0 + max_examples_per_endpoint: int = 0 + +class ExamplesStatsGenerator: + def __init__(self, endpoint_info_list: List[EndpointInfo]): + self.endpoint_info_list = endpoint_info_list + self.spec_path = "." + + def get_stats(self) -> ExampleStats: + stats = ExampleStats() + for endpoint_info in self.endpoint_info_list: + stats.num_endpoints += 1 + + if endpoint_info.num_examples > 0: + stats.num_endpoints_with_examples += 1 + + if endpoint_info.num_examples > stats.max_examples_per_endpoint: + stats.max_examples_per_endpoint = endpoint_info.num_examples + + if endpoint_info.num_request_examples > 0 and endpoint_info.num_response_examples == 0: + stats.num_endpoints_with_only_request_examples += 1 + elif endpoint_info.num_request_examples == 0 and endpoint_info.num_response_examples > 0: + stats.num_endpoints_with_only_response_examples += 1 + elif endpoint_info.num_request_examples > 0 and endpoint_info.num_response_examples > 0: + stats.num_endpoints_with_both_request_and_response_examples += 1 + else: + stats.num_endpoints_with_no_examples += 1 + + stats.num_request_examples += endpoint_info.num_request_examples + stats.num_response_examples += endpoint_info.num_response_examples + + non_200_response_codes = endpoint_info.examples_response_codes - {"200"} + if len(non_200_response_codes) > 0: + stats.num_endpoints_with_non_200_response_code_examples += 1 + return stats \ No newline at end of file diff --git a/src/scripts/examples-stats/constants.py b/src/scripts/examples-stats/constants.py new file mode 100644 index 0000000000..c6ae097843 --- /dev/null +++ b/src/scripts/examples-stats/constants.py @@ -0,0 +1,2 @@ +DEFAULT_SPEC_PATH = "../../../specification" +EXAMPLES_FOLDER = "examples" diff --git a/src/scripts/examples-stats/main.py b/src/scripts/examples-stats/main.py new file mode 100755 index 0000000000..324a90edfb --- /dev/null +++ b/src/scripts/examples-stats/main.py @@ -0,0 +1,56 @@ +#!/usr/bin/python3 + +import os +from constants import DEFAULT_SPEC_PATH +from ExamplesInfoGenerator import ExamplesInfoGenerator, ExamplesInfo, ExampleStats + +def print_stats(stats: ExampleStats): + print ("===============") + print ("==== Stats ====") + print ("===============") + print(f"Endpoints: {stats.num_endpoints}") + print(f"Endpoints with no request or response examples: {stats.num_endpoints_with_no_examples}") + print(f"Endpoints with examples: {stats.num_endpoints_with_examples}") + print(f" {stats.num_endpoints_with_only_request_examples:>4}: Only request examples") + print(f" {stats.num_endpoints_with_only_response_examples:>4}: Only response examples") + print(f" {stats.num_endpoints_with_both_request_and_response_examples:>4}: Both request and response examples") + print(f"Endpoints with non-200 response code examples: {stats.num_endpoints_with_non_200_response_code_examples}") + print("------------------------") + print(f"Examples: {stats.num_request_examples + stats.num_response_examples}") + print(f" {stats.num_request_examples:>4}: Request examples") + print(f" {stats.num_response_examples:>4}: Response examples") + print(f"Max examples per endpoint: {stats.max_examples_per_endpoint}") + print ("===============\n") + +def main(): + # Using a default spc path. We should add an option + # for getting the spec path as a command line argument + os.chdir(DEFAULT_SPEC_PATH) + examples_info_generator = ExamplesInfoGenerator() + examples_info: ExamplesInfo = examples_info_generator.get_examples_info() + # === print stats + print_stats(examples_info.stats) + # === print paths with max examples + print("Paths with max examples:") + for endpoint_info in examples_info.endpoints_info: + if endpoint_info.num_examples == examples_info.stats.max_examples_per_endpoint: + print(f" {endpoint_info.path} with {endpoint_info.num_examples} examples") + print() + # === print all recognized examples subfolders + all_examples_subfolders = set() + all_recognized_examples_subfolders = set() + for endpoint_info in examples_info.endpoints_info: + all_examples_subfolders.update(endpoint_info.examples_subfolders) + all_recognized_examples_subfolders.update(endpoint_info.recognized_examples_subfolders) + print("All recognized subfolders of 'examples' folder:") + for folder in all_recognized_examples_subfolders: + print(f" {folder}") + print() + # === print unrecognized examples subfolders + unrecognized_examples_subfolders = all_examples_subfolders - all_recognized_examples_subfolders + print("unrecognized subfolders of 'examples' folder:") + for folder in unrecognized_examples_subfolders: + print(f" {folder}") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/scripts/openapi-analyzer/DetailedStatsGenerator.py b/src/scripts/openapi-analyzer/DetailedStatsGenerator.py new file mode 100644 index 0000000000..6b7e3d78d1 --- /dev/null +++ b/src/scripts/openapi-analyzer/DetailedStatsGenerator.py @@ -0,0 +1,115 @@ +import os +from dataclasses import dataclass, field +from EndpointInfo import EndpointInfo +from constants import ENDPOINT_OPERATIONS +from openapi_pydantic.v3.v3_0 import OpenAPI, Reference + + +@dataclass +class RequestStats: + num_operations: int = 0 + num_with_body: int = 0 + num_with_examples: int = 0 + num_examples: int = 0 + content_types: set[str] = field(default_factory=set) + +@dataclass +class ResponseCodeStats: + num_operations: int = 0 + num_with_body: int = 0 + num_with_examples: int = 0 + num_examples: int = 0 + content_types: set[str] = field(default_factory=set) + +@dataclass +class ResponseStats: + num_operations: int = 0 + +@dataclass +class OperationStats: + request_stats: RequestStats = field(default_factory=RequestStats) + response_stats: dict[str, ResponseCodeStats] = field(default_factory=dict) + +@dataclass +class DetailedStats: + num_endpoints: int = 0 + num_operations: int = 0 + operation_stats: dict[str, OperationStats] = field(default_factory=dict) + + +class DetailedStatsGenerator: + def __init__(self, openapi_spec: OpenAPI): + self.openapi_spec = openapi_spec + + def get_endpoint_info_list(self) -> list[EndpointInfo]: + endpoint_info_list = [] + for path, path_item in self.openapi_spec.paths.items(): + endpointInfo = EndpointInfo.from_path(path, path_item) + endpoint_info_list.append(endpointInfo) + return endpoint_info_list + + def get_detailed_stats(self) -> DetailedStats: + endpoint_info_list = self.get_endpoint_info_list() + stats = DetailedStats() + stats.num_endpoints = len(endpoint_info_list) + for operation in ENDPOINT_OPERATIONS: + for endpoint_info in endpoint_info_list: + if operation in endpoint_info.operations: + stats.num_operations += 1 + if operation not in stats.operation_stats: + stats.operation_stats[operation] = OperationStats() + operation_stats = stats.operation_stats[operation] + operation_stats.request_stats.num_operations += 1 + if endpoint_info.operations[operation].requestBody: + requestBody = endpoint_info.operations[operation].requestBody + if isinstance(requestBody, Reference): + component_request_ref = os.path.basename(requestBody.ref) + requestBody = self.openapi_spec.components.requestBodies[component_request_ref] + for content_type, media_type in requestBody.content.items(): + operation_stats.request_stats.content_types.add(content_type) + if media_type.examples: + operation_stats.request_stats.num_with_examples += 1 + operation_stats.request_stats.num_examples += len(media_type.examples) + operation_stats.request_stats.num_with_body += 1 + if endpoint_info.operations[operation].responses: + for response_code, response in endpoint_info.operations[operation].responses.items(): + if response_code not in operation_stats.response_stats: + operation_stats.response_stats[response_code] = ResponseCodeStats() + operation_stats.response_stats[response_code].num_operations += 1 + if isinstance(response, Reference): + component_response_ref = os.path.basename(response.ref) + response = self.openapi_spec.components.responses[component_response_ref] + if response.content: + for content_type, media_type in response.content.items(): + operation_stats.response_stats[response_code].content_types.add(content_type) + if media_type.examples: + operation_stats.response_stats[response_code].num_with_examples += 1 + operation_stats.response_stats[response_code].num_examples += len(media_type.examples) + operation_stats.response_stats[response_code].num_with_body += 1 + return stats + + def print_detailed_stats(self, stats: DetailedStats): + print("========================") + print("==== Detailed Stats ====") + print("========================") + print(f"Number of endpoints: {stats.num_endpoints}") + print(f"Number of operations: {stats.num_operations}") + for operation, operation_stats in stats.operation_stats.items(): + print(f" {operation}: {operation_stats.request_stats.num_operations}") + print(" Requests:") + print(f" with body: {operation_stats.request_stats.num_with_body}") + print(" Content types:") + for content_type in operation_stats.request_stats.content_types: + print(f" {content_type}") + print(f" Has examples: {operation_stats.request_stats.num_with_examples}") + print(f" Number of examples: {operation_stats.request_stats.num_examples}") + for response_code, response_code_stats in operation_stats.response_stats.items(): + print(" Responses:") + print(f" '{response_code}': {response_code_stats.num_operations}") + print(f" Has content: {response_code_stats.num_with_body}") + print(" Content types:") + for content_type in response_code_stats.content_types: + print(f" {content_type}") + print(f" Has examples: {response_code_stats.num_with_examples}") + print(f" Number of examples: {response_code_stats.num_examples}") + print() diff --git a/src/scripts/openapi-analyzer/EndpointInfo.py b/src/scripts/openapi-analyzer/EndpointInfo.py new file mode 100644 index 0000000000..1e4f1f720c --- /dev/null +++ b/src/scripts/openapi-analyzer/EndpointInfo.py @@ -0,0 +1,24 @@ +from openapi_pydantic.v3.v3_0 import PathItem +from constants import ENDPOINT_OPERATIONS + +class EndpointInfo: + def __init__(self, path: str): + self.path = path + self.operations = {} + self.summary = None + self.description = None + self.parameters = [] + + def init(self, path_item: PathItem): + self.summary = path_item.summary + self.description = path_item.description + self.parameters = path_item.parameters + for operation in ENDPOINT_OPERATIONS: + if getattr(path_item, operation): + self.operations[operation] = getattr(path_item, operation) + + @staticmethod + def from_path(path: str, path_item: PathItem): + endpoint_info = EndpointInfo(path) + endpoint_info.init(path_item) + return endpoint_info \ No newline at end of file diff --git a/src/scripts/openapi-analyzer/OpenapiAnalyzer.py b/src/scripts/openapi-analyzer/OpenapiAnalyzer.py new file mode 100644 index 0000000000..b808618905 --- /dev/null +++ b/src/scripts/openapi-analyzer/OpenapiAnalyzer.py @@ -0,0 +1,20 @@ +from DetailedStatsGenerator import DetailedStatsGenerator +from SummaryStatsGenerator import SummaryStatsGenerator +from openapi_pydantic.v3.v3_0 import OpenAPI + + +class OpenapiAnalyzer: + def __init__(self, openapi_filepath): + self.openapi_filepath = openapi_filepath + + def run(self): + openapi_spec = OpenAPI.parse_file(self.openapi_filepath) + print(f"OpenAPI version: {openapi_spec.openapi}\n") + detailed_stats_generator = DetailedStatsGenerator(openapi_spec) + detailed_stats = detailed_stats_generator.get_detailed_stats() + summary_stats_generator = SummaryStatsGenerator(detailed_stats) + summary_stats = summary_stats_generator.get_summary_stats() + summary_stats_generator.print_summary_stats(summary_stats) + detailed_stats_generator.print_detailed_stats(detailed_stats) + + diff --git a/src/scripts/openapi-analyzer/SummaryStatsGenerator.py b/src/scripts/openapi-analyzer/SummaryStatsGenerator.py new file mode 100644 index 0000000000..13ec17bd99 --- /dev/null +++ b/src/scripts/openapi-analyzer/SummaryStatsGenerator.py @@ -0,0 +1,60 @@ +from dataclasses import dataclass, field +from DetailedStatsGenerator import DetailedStats + + +@dataclass +class SummaryStats: + num_endpoints: int = 0 + num_operations: int = 0 + num_requests_with_body: int = 0 + num_requests_with_examples: int = 0 + num_request_examples: int = 0 + num_responses_with_body: int = 0 + num_responses_with_examples: int = 0 + num_response_examples: int = 0 + num_examples: int = 0 + request_content_types: set[str] = field(default_factory=set) + response_content_types: set[str] = field(default_factory=set) + + +class SummaryStatsGenerator: + def __init__(self, detailed_stats: DetailedStats): + self.detailed_stats = detailed_stats + + def get_summary_stats(self) -> SummaryStats: + summary_stats = SummaryStats() + summary_stats.num_endpoints = self.detailed_stats.num_endpoints + summary_stats.num_operations = self.detailed_stats.num_operations + for _operation, operation_stats in self.detailed_stats.operation_stats.items(): + summary_stats.num_requests_with_body += operation_stats.request_stats.num_with_body + summary_stats.num_requests_with_examples += operation_stats.request_stats.num_with_examples + summary_stats.num_request_examples += operation_stats.request_stats.num_examples + summary_stats.request_content_types.update(operation_stats.request_stats.content_types) + for _response_code, response_code_stats in operation_stats.response_stats.items(): + summary_stats.num_responses_with_body += response_code_stats.num_with_body + summary_stats.num_responses_with_examples += response_code_stats.num_with_examples + summary_stats.num_response_examples += response_code_stats.num_examples + summary_stats.response_content_types.update(response_code_stats.content_types) + return summary_stats + + def print_summary_stats(self, stats: SummaryStats): + print("=======================") + print("==== Summary Stats ====") + print("=======================") + print(f"Number of endpoints: {stats.num_endpoints}") + print(f"Number of operations: {stats.num_operations}") + print(" Requests:") + print(f" with body: {stats.num_requests_with_body}") + print(f" with examples: {stats.num_requests_with_examples}") + print(f" number of examples: {stats.num_request_examples}") + print(" Content types:") + for content_type in stats.request_content_types: + print(f" {content_type}") + print(" Responses:") + print(f" with body: {stats.num_responses_with_body}") + print(f' with examples: {stats.num_responses_with_examples}') + print(f" number of examples: {stats.num_response_examples}") + print(" Content types:") + for content_type in stats.response_content_types: + print(f" {content_type}") + print() \ No newline at end of file diff --git a/src/scripts/openapi-analyzer/constants.py b/src/scripts/openapi-analyzer/constants.py new file mode 100644 index 0000000000..fbcae0df89 --- /dev/null +++ b/src/scripts/openapi-analyzer/constants.py @@ -0,0 +1,14 @@ +DEFAULT_OUTPUT_PATH = "../../../output" +DEFAULT_OPENAPI_FOLDER = "openapi" +DEFAULT_OPENAPI_FILE = "elasticsearch-openapi.json" + +ENDPOINT_OPERATIONS = [ + "get", + "put", + "post", + "delete", + "options", + "head", + "patch", + "trace" +] diff --git a/src/scripts/openapi-analyzer/main.py b/src/scripts/openapi-analyzer/main.py new file mode 100755 index 0000000000..54ed784141 --- /dev/null +++ b/src/scripts/openapi-analyzer/main.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python + +import os +from constants import ( + DEFAULT_OUTPUT_PATH, + DEFAULT_OPENAPI_FOLDER, + DEFAULT_OPENAPI_FILE +) +from OpenapiAnalyzer import OpenapiAnalyzer + +def main(): + openpi_filepath = os.path.join(DEFAULT_OUTPUT_PATH, + DEFAULT_OPENAPI_FOLDER, + DEFAULT_OPENAPI_FILE) + if not os.path.exists(openpi_filepath): + print(f"OpenAPI file not found: {openpi_filepath}") + return + openapi_analyzer = OpenapiAnalyzer(openpi_filepath) + openapi_analyzer.run() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/scripts/openapi-analyzer/requirements.txt b/src/scripts/openapi-analyzer/requirements.txt new file mode 100644 index 0000000000..b3285e9f0f --- /dev/null +++ b/src/scripts/openapi-analyzer/requirements.txt @@ -0,0 +1,2 @@ + +openapi-pydantic==0.5.1 \ No newline at end of file diff --git a/typescript-generator/src/metamodel.ts b/typescript-generator/src/metamodel.ts index 9f01d7a956..37a0de06ed 100644 --- a/typescript-generator/src/metamodel.ts +++ b/typescript-generator/src/metamodel.ts @@ -260,6 +260,19 @@ export class Interface extends BaseType { variants?: Container } +/** + * The Example type is used for both requests and responses + * This type definition is taken from the OpenAPI spec + * https://spec.openapis.org/oas/v3.1.0#example-object + * With the exception of using String as the 'value' type + */ +export class Example { + summary?: string + description?: string + value?: string + external_value?: string +} + /** * A request type */ @@ -288,6 +301,7 @@ export class Request extends BaseType { body: Body behaviors?: Behavior[] attachedBehaviors?: string[] + examples?: Map } /** @@ -300,6 +314,7 @@ export class Response extends BaseType { behaviors?: Behavior[] attachedBehaviors?: string[] exceptions?: ResponseException[] + examples?: Map } export class ResponseException {