diff --git a/elasticsearch_7/__init__.py b/elasticsearch_7/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..80c23562a32fefde8fcec106d0c155ae2f13b4aa
--- /dev/null
+++ b/elasticsearch_7/__init__.py
@@ -0,0 +1,80 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+# flake8: noqa
+from __future__ import absolute_import
+
+VERSION = (7, 8, 0)
+__version__ = VERSION
+__versionstr__ = "7.8.0"
+
+import sys
+import logging
+import warnings
+
+logger = logging.getLogger("elasticsearch")
+logger.addHandler(logging.NullHandler())
+
+from .client import Elasticsearch
+from .transport import Transport
+from .connection_pool import ConnectionPool, ConnectionSelector, RoundRobinSelector
+from .serializer import JSONSerializer
+from .connection import Connection, RequestsHttpConnection, Urllib3HttpConnection
+from .exceptions import (
+    ImproperlyConfigured,
+    ElasticsearchException,
+    SerializationError,
+    TransportError,
+    NotFoundError,
+    ConflictError,
+    RequestError,
+    ConnectionError,
+    SSLError,
+    ConnectionTimeout,
+    AuthenticationException,
+    AuthorizationException,
+    ElasticsearchDeprecationWarning,
+)
+
+# Only raise one warning per deprecation message so as not
+# to spam up the user if the same action is done multiple times.
+warnings.simplefilter("default", category=ElasticsearchDeprecationWarning, append=True)
+
+__all__ = [
+    "Elasticsearch",
+    "Transport",
+    "ConnectionPool",
+    "ConnectionSelector",
+    "RoundRobinSelector",
+    "JSONSerializer",
+    "Connection",
+    "RequestsHttpConnection",
+    "Urllib3HttpConnection",
+    "ImproperlyConfigured",
+    "ElasticsearchException",
+    "SerializationError",
+    "TransportError",
+    "NotFoundError",
+    "ConflictError",
+    "RequestError",
+    "ConnectionError",
+    "SSLError",
+    "ConnectionTimeout",
+    "AuthenticationException",
+    "AuthorizationException",
+    "ElasticsearchDeprecationWarning",
+]
+
+try:
+    # Asyncio only supported on Python 3.6+
+    if sys.version_info < (3, 6):
+        raise ImportError
+
+    from ._async.http_aiohttp import AIOHttpConnection
+    from ._async.transport import AsyncTransport
+    from ._async.client import AsyncElasticsearch
+
+    __all__ += ["AIOHttpConnection", "AsyncTransport", "AsyncElasticsearch"]
+except (ImportError, SyntaxError):
+    pass
diff --git a/elasticsearch_7/__pycache__/__init__.cpython-38.pyc b/elasticsearch_7/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7beb7cd21f19a92050724dfc2a90413939e4f9c1
Binary files /dev/null and b/elasticsearch_7/__pycache__/__init__.cpython-38.pyc differ
diff --git a/elasticsearch_7/__pycache__/compat.cpython-38.pyc b/elasticsearch_7/__pycache__/compat.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f41e84623f37fe58430fe8e3924c4fca5c678747
Binary files /dev/null and b/elasticsearch_7/__pycache__/compat.cpython-38.pyc differ
diff --git a/elasticsearch_7/__pycache__/connection_pool.cpython-38.pyc b/elasticsearch_7/__pycache__/connection_pool.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f077399aa5967373e7c1a0c89297c410698f525b
Binary files /dev/null and b/elasticsearch_7/__pycache__/connection_pool.cpython-38.pyc differ
diff --git a/elasticsearch_7/__pycache__/exceptions.cpython-38.pyc b/elasticsearch_7/__pycache__/exceptions.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d9cca1c7f11dbb5a8f9641b148875cb5b7b43e32
Binary files /dev/null and b/elasticsearch_7/__pycache__/exceptions.cpython-38.pyc differ
diff --git a/elasticsearch_7/__pycache__/serializer.cpython-38.pyc b/elasticsearch_7/__pycache__/serializer.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..486923495d2b2f9580de7ab4061aaa57df7beeb0
Binary files /dev/null and b/elasticsearch_7/__pycache__/serializer.cpython-38.pyc differ
diff --git a/elasticsearch_7/__pycache__/transport.cpython-38.pyc b/elasticsearch_7/__pycache__/transport.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e97be4a241cb9f2ef70ed3009caf635cfaeed3f
Binary files /dev/null and b/elasticsearch_7/__pycache__/transport.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/__init__.py b/elasticsearch_7/_async/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a3c439ef6960c58744d8611e7ba305dc3cc1c62
--- /dev/null
+++ b/elasticsearch_7/_async/__init__.py
@@ -0,0 +1,3 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
diff --git a/elasticsearch_7/_async/__pycache__/__init__.cpython-38.pyc b/elasticsearch_7/_async/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..14b9b105ca4f801787fd3545ea13a983bc2a994f
Binary files /dev/null and b/elasticsearch_7/_async/__pycache__/__init__.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/__pycache__/compat.cpython-38.pyc b/elasticsearch_7/_async/__pycache__/compat.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..db744f2907064be6727bd833e9fddd4f61341c42
Binary files /dev/null and b/elasticsearch_7/_async/__pycache__/compat.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/__pycache__/helpers.cpython-38.pyc b/elasticsearch_7/_async/__pycache__/helpers.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1129fc05b56e356870faac35d3c83d55b7a81c1
Binary files /dev/null and b/elasticsearch_7/_async/__pycache__/helpers.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/__pycache__/http_aiohttp.cpython-38.pyc b/elasticsearch_7/_async/__pycache__/http_aiohttp.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..13baf7e8c2e771276d4ec5c6f5da39b14b522472
Binary files /dev/null and b/elasticsearch_7/_async/__pycache__/http_aiohttp.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/__pycache__/transport.cpython-38.pyc b/elasticsearch_7/_async/__pycache__/transport.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d88cbd5b781fcfaef45edf2074d020b970f1ea0
Binary files /dev/null and b/elasticsearch_7/_async/__pycache__/transport.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__init__.py b/elasticsearch_7/_async/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4409c46392b0840e32c23b0ae8c7653679043107
--- /dev/null
+++ b/elasticsearch_7/_async/client/__init__.py
@@ -0,0 +1,2011 @@
+# -*- coding: utf-8 -*-
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from __future__ import unicode_literals
+import logging
+
+from ..transport import AsyncTransport, TransportError
+from .indices import IndicesClient
+from .ingest import IngestClient
+from .cluster import ClusterClient
+from .cat import CatClient
+from .nodes import NodesClient
+from .remote import RemoteClient
+from .snapshot import SnapshotClient
+from .tasks import TasksClient
+from .xpack import XPackClient
+from .utils import query_params, _make_path, SKIP_IN_PATH, _bulk_body, _normalize_hosts
+
+# xpack APIs
+from .async_search import AsyncSearchClient
+from .autoscaling import AutoscalingClient
+from .ccr import CcrClient
+from .data_frame import Data_FrameClient
+from .deprecation import DeprecationClient
+from .eql import EqlClient
+from .graph import GraphClient
+from .ilm import IlmClient
+from .license import LicenseClient
+from .migration import MigrationClient
+from .ml import MlClient
+from .monitoring import MonitoringClient
+from .rollup import RollupClient
+from .security import SecurityClient
+from .sql import SqlClient
+from .ssl import SslClient
+from .watcher import WatcherClient
+from .enrich import EnrichClient
+from .searchable_snapshots import SearchableSnapshotsClient
+from .slm import SlmClient
+from .transform import TransformClient
+
+
+logger = logging.getLogger("elasticsearch")
+
+
+class AsyncElasticsearch(object):
+    """
+    Elasticsearch low-level client. Provides a straightforward mapping from
+    Python to ES REST endpoints.
+
+    The instance has attributes ``cat``, ``cluster``, ``indices``, ``ingest``,
+    ``nodes``, ``snapshot`` and ``tasks`` that provide access to instances of
+    :class:`~elasticsearch.client.CatClient`,
+    :class:`~elasticsearch.client.ClusterClient`,
+    :class:`~elasticsearch.client.IndicesClient`,
+    :class:`~elasticsearch.client.IngestClient`,
+    :class:`~elasticsearch.client.NodesClient`,
+    :class:`~elasticsearch.client.SnapshotClient` and
+    :class:`~elasticsearch.client.TasksClient` respectively. This is the
+    preferred (and only supported) way to get access to those classes and their
+    methods.
+
+    You can specify your own connection class which should be used by providing
+    the ``connection_class`` parameter::
+
+        # create connection to localhost using the ThriftConnection
+        es = Elasticsearch(connection_class=ThriftConnection)
+
+    If you want to turn on :ref:`sniffing` you have several options (described
+    in :class:`~elasticsearch.Transport`)::
+
+        # create connection that will automatically inspect the cluster to get
+        # the list of active nodes. Start with nodes running on 'esnode1' and
+        # 'esnode2'
+        es = Elasticsearch(
+            ['esnode1', 'esnode2'],
+            # sniff before doing anything
+            sniff_on_start=True,
+            # refresh nodes after a node fails to respond
+            sniff_on_connection_fail=True,
+            # and also every 60 seconds
+            sniffer_timeout=60
+        )
+
+    Different hosts can have different parameters, use a dictionary per node to
+    specify those::
+
+        # connect to localhost directly and another node using SSL on port 443
+        # and an url_prefix. Note that ``port`` needs to be an int.
+        es = Elasticsearch([
+            {'host': 'localhost'},
+            {'host': 'othernode', 'port': 443, 'url_prefix': 'es', 'use_ssl': True},
+        ])
+
+    If using SSL, there are several parameters that control how we deal with
+    certificates (see :class:`~elasticsearch.Urllib3HttpConnection` for
+    detailed description of the options)::
+
+        es = Elasticsearch(
+            ['localhost:443', 'other_host:443'],
+            # turn on SSL
+            use_ssl=True,
+            # make sure we verify SSL certificates
+            verify_certs=True,
+            # provide a path to CA certs on disk
+            ca_certs='/path/to/CA_certs'
+        )
+
+    If using SSL, but don't verify the certs, a warning message is showed
+    optionally (see :class:`~elasticsearch.Urllib3HttpConnection` for
+    detailed description of the options)::
+
+        es = Elasticsearch(
+            ['localhost:443', 'other_host:443'],
+            # turn on SSL
+            use_ssl=True,
+            # no verify SSL certificates
+            verify_certs=False,
+            # don't show warnings about ssl certs verification
+            ssl_show_warn=False
+        )
+
+    SSL client authentication is supported
+    (see :class:`~elasticsearch.Urllib3HttpConnection` for
+    detailed description of the options)::
+
+        es = Elasticsearch(
+            ['localhost:443', 'other_host:443'],
+            # turn on SSL
+            use_ssl=True,
+            # make sure we verify SSL certificates
+            verify_certs=True,
+            # provide a path to CA certs on disk
+            ca_certs='/path/to/CA_certs',
+            # PEM formatted SSL client certificate
+            client_cert='/path/to/clientcert.pem',
+            # PEM formatted SSL client key
+            client_key='/path/to/clientkey.pem'
+        )
+
+    Alternatively you can use RFC-1738 formatted URLs, as long as they are not
+    in conflict with other options::
+
+        es = Elasticsearch(
+            [
+                'http://user:secret@localhost:9200/',
+                'https://user:secret@other_host:443/production'
+            ],
+            verify_certs=True
+        )
+
+    By default, `JSONSerializer
+    <https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/serializer.py#L24>`_
+    is used to encode all outgoing requests.
+    However, you can implement your own custom serializer::
+
+        from elasticsearch.serializer import JSONSerializer
+
+        class SetEncoder(JSONSerializer):
+            def default(self, obj):
+                if isinstance(obj, set):
+                    return list(obj)
+                if isinstance(obj, Something):
+                    return 'CustomSomethingRepresentation'
+                return JSONSerializer.default(self, obj)
+
+        es = Elasticsearch(serializer=SetEncoder())
+
+    """
+
+    def __init__(self, hosts=None, transport_class=AsyncTransport, **kwargs):
+        """
+        :arg hosts: list of nodes, or a single node, we should connect to.
+            Node should be a dictionary ({"host": "localhost", "port": 9200}),
+            the entire dictionary will be passed to the :class:`~elasticsearch.Connection`
+            class as kwargs, or a string in the format of ``host[:port]`` which will be
+            translated to a dictionary automatically.  If no value is given the
+            :class:`~elasticsearch.Connection` class defaults will be used.
+
+        :arg transport_class: :class:`~elasticsearch.Transport` subclass to use.
+
+        :arg kwargs: any additional arguments will be passed on to the
+            :class:`~elasticsearch.Transport` class and, subsequently, to the
+            :class:`~elasticsearch.Connection` instances.
+        """
+        self.transport = transport_class(_normalize_hosts(hosts), **kwargs)
+
+        # namespaced clients for compatibility with API names
+        self.indices = IndicesClient(self)
+        self.ingest = IngestClient(self)
+        self.cluster = ClusterClient(self)
+        self.cat = CatClient(self)
+        self.nodes = NodesClient(self)
+        self.remote = RemoteClient(self)
+        self.snapshot = SnapshotClient(self)
+        self.tasks = TasksClient(self)
+
+        self.xpack = XPackClient(self)
+        self.async_search = AsyncSearchClient(self)
+        self.autoscaling = AutoscalingClient(self)
+        self.ccr = CcrClient(self)
+        self.data_frame = Data_FrameClient(self)
+        self.deprecation = DeprecationClient(self)
+        self.eql = EqlClient(self)
+        self.graph = GraphClient(self)
+        self.ilm = IlmClient(self)
+        self.indices = IndicesClient(self)
+        self.license = LicenseClient(self)
+        self.migration = MigrationClient(self)
+        self.ml = MlClient(self)
+        self.monitoring = MonitoringClient(self)
+        self.rollup = RollupClient(self)
+        self.security = SecurityClient(self)
+        self.sql = SqlClient(self)
+        self.ssl = SslClient(self)
+        self.watcher = WatcherClient(self)
+        self.enrich = EnrichClient(self)
+        self.searchable_snapshots = SearchableSnapshotsClient(self)
+        self.slm = SlmClient(self)
+        self.transform = TransformClient(self)
+
+    def __repr__(self):
+        try:
+            # get a list of all connections
+            cons = self.transport.hosts
+            # truncate to 5 if there are too many
+            if len(cons) > 5:
+                cons = cons[:5] + ["..."]
+            return "<{cls}({cons})>".format(cls=self.__class__.__name__, cons=cons)
+        except Exception:
+            # probably operating on custom transport and connection_pool, ignore
+            return super(AsyncElasticsearch, self).__repr__()
+
+    async def __aenter__(self):
+        if hasattr(self.transport, "_async_call"):
+            await self.transport._async_call()
+        return self
+
+    async def __aexit__(self, *_):
+        await self.close()
+
+    async def close(self):
+        """Closes the Transport and all internal connections"""
+        await self.transport.close()
+
+    # AUTO-GENERATED-API-DEFINITIONS #
+    @query_params()
+    async def ping(self, params=None, headers=None):
+        """
+        Returns whether the cluster is running.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/index.html>`_
+        """
+        try:
+            return await self.transport.perform_request(
+                "HEAD", "/", params=params, headers=headers
+            )
+        except TransportError:
+            return False
+
+    @query_params()
+    async def info(self, params=None, headers=None):
+        """
+        Returns basic information about the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/index.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/", params=params, headers=headers
+        )
+
+    @query_params(
+        "pipeline",
+        "refresh",
+        "routing",
+        "timeout",
+        "version",
+        "version_type",
+        "wait_for_active_shards",
+    )
+    async def create(self, index, id, body, doc_type=None, params=None, headers=None):
+        """
+        Creates a new document in the index.  Returns a 409 response when a document
+        with a same ID already exists in the index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-index_.html>`_
+
+        :arg index: The name of the index
+        :arg id: Document ID
+        :arg body: The document
+        :arg doc_type: The type of the document
+        :arg pipeline: The pipeline id to preprocess incoming documents
+            with
+        :arg refresh: If `true` then refresh the affected shards to make
+            this operation visible to search, if `wait_for` then wait for a refresh
+            to make this operation visible to search, if `false` (the default) then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the index operation. Defaults
+            to 1, meaning the primary shard only. Set to `all` for all shard copies,
+            otherwise set to any non-negative value less than or equal to the total
+            number of copies for the shard (number of replicas + 1)
+        """
+        for param in (index, id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_create", id)
+        else:
+            path = _make_path(index, doc_type, id, "_create")
+
+        return await self.transport.perform_request(
+            "PUT", path, params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "if_primary_term",
+        "if_seq_no",
+        "op_type",
+        "pipeline",
+        "refresh",
+        "routing",
+        "timeout",
+        "version",
+        "version_type",
+        "wait_for_active_shards",
+    )
+    async def index(
+        self, index, body, doc_type=None, id=None, params=None, headers=None
+    ):
+        """
+        Creates or updates a document in an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-index_.html>`_
+
+        :arg index: The name of the index
+        :arg body: The document
+        :arg doc_type: The type of the document
+        :arg id: Document ID
+        :arg if_primary_term: only perform the index operation if the
+            last operation that has changed the document has the specified primary
+            term
+        :arg if_seq_no: only perform the index operation if the last
+            operation that has changed the document has the specified sequence
+            number
+        :arg op_type: Explicit operation type. Defaults to `index` for
+            requests with an explicit document ID, and to `create`for requests
+            without an explicit document ID  Valid choices: index, create
+        :arg pipeline: The pipeline id to preprocess incoming documents
+            with
+        :arg refresh: If `true` then refresh the affected shards to make
+            this operation visible to search, if `wait_for` then wait for a refresh
+            to make this operation visible to search, if `false` (the default) then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the index operation. Defaults
+            to 1, meaning the primary shard only. Set to `all` for all shard copies,
+            otherwise set to any non-negative value less than or equal to the total
+            number of copies for the shard (number of replicas + 1)
+        """
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type is None:
+            doc_type = "_doc"
+
+        return await self.transport.perform_request(
+            "POST" if id in SKIP_IN_PATH else "PUT",
+            _make_path(index, doc_type, id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "pipeline",
+        "refresh",
+        "routing",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    async def bulk(self, body, index=None, doc_type=None, params=None, headers=None):
+        """
+        Allows to perform multiple index/update/delete operations in a single request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-bulk.html>`_
+
+        :arg body: The operation definition and data (action-data
+            pairs), separated by newlines
+        :arg index: Default index for items which don't provide one
+        :arg doc_type: Default document type for items which don't
+            provide one
+        :arg _source: True or false to return the _source field or not,
+            or default list of fields to return, can be overridden on each sub-
+            request
+        :arg _source_excludes: Default list of fields to exclude from
+            the returned _source field, can be overridden on each sub-request
+        :arg _source_includes: Default list of fields to extract and
+            return from the _source field, can be overridden on each sub-request
+        :arg pipeline: The pipeline id to preprocess incoming documents
+            with
+        :arg refresh: If `true` then refresh the affected shards to make
+            this operation visible to search, if `wait_for` then wait for a refresh
+            to make this operation visible to search, if `false` (the default) then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the bulk operation. Defaults
+            to 1, meaning the primary shard only. Set to `all` for all shard copies,
+            otherwise set to any non-negative value less than or equal to the total
+            number of copies for the shard (number of replicas + 1)
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_bulk"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def clear_scroll(self, body=None, scroll_id=None, params=None, headers=None):
+        """
+        Explicitly clears the search context for a scroll.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-request-body.html#_clear_scroll_api>`_
+
+        :arg body: A comma-separated list of scroll IDs to clear if none
+            was specified via the scroll_id parameter
+        :arg scroll_id: A comma-separated list of scroll IDs to clear
+        """
+        if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH:
+            raise ValueError("You need to supply scroll_id or body.")
+        elif scroll_id and not body:
+            body = {"scroll_id": [scroll_id]}
+        elif scroll_id:
+            params["scroll_id"] = scroll_id
+
+        return await self.transport.perform_request(
+            "DELETE", "/_search/scroll", params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "analyze_wildcard",
+        "analyzer",
+        "default_operator",
+        "df",
+        "expand_wildcards",
+        "ignore_throttled",
+        "ignore_unavailable",
+        "lenient",
+        "min_score",
+        "preference",
+        "q",
+        "routing",
+        "terminate_after",
+    )
+    async def count(
+        self, body=None, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Returns number of documents matching a query.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-count.html>`_
+
+        :arg body: A query to restrict the results specified with the
+            Query DSL (optional)
+        :arg index: A comma-separated list of indices to restrict the
+            results
+        :arg doc_type: A comma-separated list of types to restrict the
+            results
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_throttled: Whether specified concrete, expanded or
+            aliased indices should be ignored when throttled
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg min_score: Include only documents with a specific `_score`
+            value in the result
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg routing: A comma-separated list of specific routing values
+        :arg terminate_after: The maximum count for each shard, upon
+            reaching which the query execution will terminate early
+        """
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_count"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "if_primary_term",
+        "if_seq_no",
+        "refresh",
+        "routing",
+        "timeout",
+        "version",
+        "version_type",
+        "wait_for_active_shards",
+    )
+    async def delete(self, index, id, doc_type=None, params=None, headers=None):
+        """
+        Removes a document from the index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-delete.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg doc_type: The type of the document
+        :arg if_primary_term: only perform the delete operation if the
+            last operation that has changed the document has the specified primary
+            term
+        :arg if_seq_no: only perform the delete operation if the last
+            operation that has changed the document has the specified sequence
+            number
+        :arg refresh: If `true` then refresh the affected shards to make
+            this operation visible to search, if `wait_for` then wait for a refresh
+            to make this operation visible to search, if `false` (the default) then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the delete operation.
+            Defaults to 1, meaning the primary shard only. Set to `all` for all
+            shard copies, otherwise set to any non-negative value less than or equal
+            to the total number of copies for the shard (number of replicas + 1)
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            doc_type = "_doc"
+
+        return await self.transport.perform_request(
+            "DELETE", _make_path(index, doc_type, id), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "allow_no_indices",
+        "analyze_wildcard",
+        "analyzer",
+        "conflicts",
+        "default_operator",
+        "df",
+        "expand_wildcards",
+        "from_",
+        "ignore_unavailable",
+        "lenient",
+        "max_docs",
+        "preference",
+        "q",
+        "refresh",
+        "request_cache",
+        "requests_per_second",
+        "routing",
+        "scroll",
+        "scroll_size",
+        "search_timeout",
+        "search_type",
+        "size",
+        "slices",
+        "sort",
+        "stats",
+        "terminate_after",
+        "timeout",
+        "version",
+        "wait_for_active_shards",
+        "wait_for_completion",
+    )
+    async def delete_by_query(
+        self, index, body, doc_type=None, params=None, headers=None
+    ):
+        """
+        Deletes documents matching the provided query.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-delete-by-query.html>`_
+
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg body: The search definition using the Query DSL
+        :arg doc_type: A comma-separated list of document types to
+            search; leave empty to perform the operation on all types
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg conflicts: What to do when the delete by query hits version
+            conflicts?  Valid choices: abort, proceed  Default: abort
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg from_: Starting offset (default: 0)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg max_docs: Maximum number of documents to process (default:
+            all documents)
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg refresh: Should the effected indexes be refreshed?
+        :arg request_cache: Specify if request cache should be used for
+            this request or not, defaults to index level setting
+        :arg requests_per_second: The throttle for this request in sub-
+            requests per second. -1 means no throttle.
+        :arg routing: A comma-separated list of specific routing values
+        :arg scroll: Specify how long a consistent view of the index
+            should be maintained for scrolled search
+        :arg scroll_size: Size on the scroll request powering the delete
+            by query  Default: 100
+        :arg search_timeout: Explicit timeout for each search request.
+            Defaults to no timeout.
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, dfs_query_then_fetch
+        :arg size: Deprecated, please use `max_docs` instead
+        :arg slices: The number of slices this task should be divided
+            into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be
+            set to `auto`.  Default: 1
+        :arg sort: A comma-separated list of <field>:<direction> pairs
+        :arg stats: Specific 'tag' of the request for logging and
+            statistical purposes
+        :arg terminate_after: The maximum number of documents to collect
+            for each shard, upon reaching which the query execution will terminate
+            early.
+        :arg timeout: Time each individual bulk request should wait for
+            shards that are unavailable.  Default: 1m
+        :arg version: Specify whether to return document version as part
+            of a hit
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the delete by query
+            operation. Defaults to 1, meaning the primary shard only. Set to `all`
+            for all shard copies, otherwise set to any non-negative value less than
+            or equal to the total number of copies for the shard (number of replicas
+            + 1)
+        :arg wait_for_completion: Should the request should block until
+            the delete by query is complete.  Default: True
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_delete_by_query"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("requests_per_second")
+    async def delete_by_query_rethrottle(self, task_id, params=None, headers=None):
+        """
+        Changes the number of requests per second for a particular Delete By Query
+        operation.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-delete-by-query.html>`_
+
+        :arg task_id: The task id to rethrottle
+        :arg requests_per_second: The throttle to set on this request in
+            floating sub-requests per second. -1 means set no throttle.
+        """
+        if task_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'task_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_delete_by_query", task_id, "_rethrottle"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def delete_script(self, id, params=None, headers=None):
+        """
+        Deletes a script.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-scripting.html>`_
+
+        :arg id: Script ID
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "DELETE", _make_path("_scripts", id), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "preference",
+        "realtime",
+        "refresh",
+        "routing",
+        "stored_fields",
+        "version",
+        "version_type",
+    )
+    async def exists(self, index, id, doc_type=None, params=None, headers=None):
+        """
+        Returns information about whether a document exists in an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-get.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg doc_type: The type of the document (use `_all` to fetch the
+            first document matching the ID across all types)
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg realtime: Specify whether to perform the operation in
+            realtime or search mode
+        :arg refresh: Refresh the shard containing the document before
+            performing the operation
+        :arg routing: Specific routing value
+        :arg stored_fields: A comma-separated list of stored fields to
+            return in the response
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            doc_type = "_doc"
+
+        return await self.transport.perform_request(
+            "HEAD", _make_path(index, doc_type, id), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "preference",
+        "realtime",
+        "refresh",
+        "routing",
+        "version",
+        "version_type",
+    )
+    async def exists_source(self, index, id, doc_type=None, params=None, headers=None):
+        """
+        Returns information about whether a document source exists in an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-get.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg doc_type: The type of the document; deprecated and optional
+            starting with 7.0
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg realtime: Specify whether to perform the operation in
+            realtime or search mode
+        :arg refresh: Refresh the shard containing the document before
+            performing the operation
+        :arg routing: Specific routing value
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_source", id)
+        else:
+            path = _make_path(index, doc_type, id, "_source")
+
+        return await self.transport.perform_request(
+            "HEAD", path, params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "analyze_wildcard",
+        "analyzer",
+        "default_operator",
+        "df",
+        "lenient",
+        "preference",
+        "q",
+        "routing",
+        "stored_fields",
+    )
+    async def explain(
+        self, index, id, body=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Returns information about why a specific matches (or doesn't match) a query.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-explain.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg body: The query definition using the Query DSL
+        :arg doc_type: The type of the document
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg analyze_wildcard: Specify whether wildcards and prefix
+            queries in the query string query should be analyzed (default: false)
+        :arg analyzer: The analyzer for the query string query
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The default field for query string query (default:
+            _all)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg routing: Specific routing value
+        :arg stored_fields: A comma-separated list of stored fields to
+            return in the response
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_explain", id)
+        else:
+            path = _make_path(index, doc_type, id, "_explain")
+
+        return await self.transport.perform_request(
+            "POST", path, params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "fields",
+        "ignore_unavailable",
+        "include_unmapped",
+    )
+    async def field_caps(self, index=None, params=None, headers=None):
+        """
+        Returns the information about the capabilities of fields among multiple
+        indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-field-caps.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg fields: A comma-separated list of field names
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg include_unmapped: Indicates whether unmapped fields should
+            be included in the response.
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_field_caps"), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "preference",
+        "realtime",
+        "refresh",
+        "routing",
+        "stored_fields",
+        "version",
+        "version_type",
+    )
+    async def get(self, index, id, doc_type=None, params=None, headers=None):
+        """
+        Returns a document.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-get.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg doc_type: The type of the document (use `_all` to fetch the
+            first document matching the ID across all types)
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg realtime: Specify whether to perform the operation in
+            realtime or search mode
+        :arg refresh: Refresh the shard containing the document before
+            performing the operation
+        :arg routing: Specific routing value
+        :arg stored_fields: A comma-separated list of stored fields to
+            return in the response
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            doc_type = "_doc"
+
+        return await self.transport.perform_request(
+            "GET", _make_path(index, doc_type, id), params=params, headers=headers
+        )
+
+    @query_params("master_timeout")
+    async def get_script(self, id, params=None, headers=None):
+        """
+        Returns a script.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-scripting.html>`_
+
+        :arg id: Script ID
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "GET", _make_path("_scripts", id), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "preference",
+        "realtime",
+        "refresh",
+        "routing",
+        "version",
+        "version_type",
+    )
+    async def get_source(self, index, id, doc_type=None, params=None, headers=None):
+        """
+        Returns the source of a document.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-get.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg doc_type: The type of the document; deprecated and optional
+            starting with 7.0
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg realtime: Specify whether to perform the operation in
+            realtime or search mode
+        :arg refresh: Refresh the shard containing the document before
+            performing the operation
+        :arg routing: Specific routing value
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_source", id)
+        else:
+            path = _make_path(index, doc_type, id, "_source")
+
+        return await self.transport.perform_request(
+            "GET", path, params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "preference",
+        "realtime",
+        "refresh",
+        "routing",
+        "stored_fields",
+    )
+    async def mget(self, body, index=None, doc_type=None, params=None, headers=None):
+        """
+        Allows to get multiple documents in one request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-multi-get.html>`_
+
+        :arg body: Document identifiers; can be either `docs`
+            (containing full document information) or `ids` (when index and type is
+            provided in the URL.
+        :arg index: The name of the index
+        :arg doc_type: The type of the document
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg realtime: Specify whether to perform the operation in
+            realtime or search mode
+        :arg refresh: Refresh the shard containing the document before
+            performing the operation
+        :arg routing: Specific routing value
+        :arg stored_fields: A comma-separated list of stored fields to
+            return in the response
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_mget"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "ccs_minimize_roundtrips",
+        "max_concurrent_searches",
+        "max_concurrent_shard_requests",
+        "pre_filter_shard_size",
+        "rest_total_hits_as_int",
+        "search_type",
+        "typed_keys",
+    )
+    async def msearch(self, body, index=None, doc_type=None, params=None, headers=None):
+        """
+        Allows to execute several search operations in one request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-multi-search.html>`_
+
+        :arg body: The request definitions (metadata-search request
+            definition pairs), separated by newlines
+        :arg index: A comma-separated list of index names to use as
+            default
+        :arg doc_type: A comma-separated list of document types to use
+            as default
+        :arg ccs_minimize_roundtrips: Indicates whether network round-
+            trips should be minimized as part of cross-cluster search requests
+            execution  Default: true
+        :arg max_concurrent_searches: Controls the maximum number of
+            concurrent searches the multi search api will execute
+        :arg max_concurrent_shard_requests: The number of concurrent
+            shard requests each sub search executes concurrently per node. This
+            value should be used to limit the impact of the search on the cluster in
+            order to limit the number of concurrent shard requests  Default: 5
+        :arg pre_filter_shard_size: A threshold that enforces a pre-
+            filter roundtrip to prefilter search shards based on query rewriting if
+            theƂ number of shards the search request expands to exceeds the
+            threshold. This filter roundtrip can limit the number of shards
+            significantly if for instance a shard can not match any documents based
+            on its rewrite method ie. if date filters are mandatory to match but the
+            shard bounds and the query are disjoint.
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, query_and_fetch, dfs_query_then_fetch,
+            dfs_query_and_fetch
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_msearch"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def put_script(self, id, body, context=None, params=None, headers=None):
+        """
+        Creates or updates a script.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-scripting.html>`_
+
+        :arg id: Script ID
+        :arg body: The document
+        :arg context: Context name to compile script against
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        for param in (id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_scripts", id, context),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "allow_no_indices", "expand_wildcards", "ignore_unavailable", "search_type"
+    )
+    async def rank_eval(self, body, index=None, params=None, headers=None):
+        """
+        Allows to evaluate the quality of ranked search results over a set of typical
+        search queries
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-rank-eval.html>`_
+
+        :arg body: The ranking evaluation search definition, including
+            search requests, document ratings and ranking metric definition.
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, dfs_query_then_fetch
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, "_rank_eval"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "max_docs",
+        "refresh",
+        "requests_per_second",
+        "scroll",
+        "slices",
+        "timeout",
+        "wait_for_active_shards",
+        "wait_for_completion",
+    )
+    async def reindex(self, body, params=None, headers=None):
+        """
+        Allows to copy documents from one index to another, optionally filtering the
+        source documents by a query, changing the destination index settings, or
+        fetching the documents from a remote cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-reindex.html>`_
+
+        :arg body: The search definition using the Query DSL and the
+            prototype for the index request.
+        :arg max_docs: Maximum number of documents to process (default:
+            all documents)
+        :arg refresh: Should the affected indexes be refreshed?
+        :arg requests_per_second: The throttle to set on this request in
+            sub-requests per second. -1 means no throttle.
+        :arg scroll: Control how long to keep the search context alive
+            Default: 5m
+        :arg slices: The number of slices this task should be divided
+            into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be
+            set to `auto`.  Default: 1
+        :arg timeout: Time each individual bulk request should wait for
+            shards that are unavailable.  Default: 1m
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the reindex operation.
+            Defaults to 1, meaning the primary shard only. Set to `all` for all
+            shard copies, otherwise set to any non-negative value less than or equal
+            to the total number of copies for the shard (number of replicas + 1)
+        :arg wait_for_completion: Should the request should block until
+            the reindex is complete.  Default: True
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST", "/_reindex", params=params, headers=headers, body=body
+        )
+
+    @query_params("requests_per_second")
+    async def reindex_rethrottle(self, task_id, params=None, headers=None):
+        """
+        Changes the number of requests per second for a particular Reindex operation.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-reindex.html>`_
+
+        :arg task_id: The task id to rethrottle
+        :arg requests_per_second: The throttle to set on this request in
+            floating sub-requests per second. -1 means set no throttle.
+        """
+        if task_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'task_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_reindex", task_id, "_rethrottle"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def render_search_template(
+        self, body=None, id=None, params=None, headers=None
+    ):
+        """
+        Allows to use the Mustache language to pre-render a search definition.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-template.html#_validating_templates>`_
+
+        :arg body: The search definition template and its params
+        :arg id: The id of the stored search template
+        """
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_render", "template", id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def scripts_painless_execute(self, body=None, params=None, headers=None):
+        """
+        Allows an arbitrary script to be executed and a result to be returned
+        `<https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html>`_
+
+        :arg body: The script to execute
+        """
+        return await self.transport.perform_request(
+            "POST",
+            "/_scripts/painless/_execute",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("rest_total_hits_as_int", "scroll")
+    async def scroll(self, body=None, scroll_id=None, params=None, headers=None):
+        """
+        Allows to retrieve a large numbers of results from a single search request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-request-body.html#request-body-search-scroll>`_
+
+        :arg body: The scroll ID if not passed by URL or query
+            parameter.
+        :arg scroll_id: The scroll ID for scrolled search
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg scroll: Specify how long a consistent view of the index
+            should be maintained for scrolled search
+        """
+        if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH:
+            raise ValueError("You need to supply scroll_id or body.")
+        elif scroll_id and not body:
+            body = {"scroll_id": scroll_id}
+        elif scroll_id:
+            params["scroll_id"] = scroll_id
+
+        return await self.transport.perform_request(
+            "POST", "/_search/scroll", params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "allow_no_indices",
+        "allow_partial_search_results",
+        "analyze_wildcard",
+        "analyzer",
+        "batched_reduce_size",
+        "ccs_minimize_roundtrips",
+        "default_operator",
+        "df",
+        "docvalue_fields",
+        "expand_wildcards",
+        "explain",
+        "from_",
+        "ignore_throttled",
+        "ignore_unavailable",
+        "lenient",
+        "max_concurrent_shard_requests",
+        "pre_filter_shard_size",
+        "preference",
+        "q",
+        "request_cache",
+        "rest_total_hits_as_int",
+        "routing",
+        "scroll",
+        "search_type",
+        "seq_no_primary_term",
+        "size",
+        "sort",
+        "stats",
+        "stored_fields",
+        "suggest_field",
+        "suggest_mode",
+        "suggest_size",
+        "suggest_text",
+        "terminate_after",
+        "timeout",
+        "track_scores",
+        "track_total_hits",
+        "typed_keys",
+        "version",
+    )
+    async def search(
+        self, body=None, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Returns results matching a query.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-search.html>`_
+
+        :arg body: The search definition using the Query DSL
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg doc_type: A comma-separated list of document types to
+            search; leave empty to perform the operation on all types
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg allow_partial_search_results: Indicate if an error should
+            be returned if there is a partial search failure or timeout  Default:
+            True
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg batched_reduce_size: The number of shard results that
+            should be reduced at once on the coordinating node. This value should be
+            used as a protection mechanism to reduce the memory overhead per search
+            request if the potential number of shards in the request can be large.
+            Default: 512
+        :arg ccs_minimize_roundtrips: Indicates whether network round-
+            trips should be minimized as part of cross-cluster search requests
+            execution  Default: true
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg docvalue_fields: A comma-separated list of fields to return
+            as the docvalue representation of a field for each hit
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg explain: Specify whether to return detailed information
+            about score computation as part of a hit
+        :arg from_: Starting offset (default: 0)
+        :arg ignore_throttled: Whether specified concrete, expanded or
+            aliased indices should be ignored when throttled
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg max_concurrent_shard_requests: The number of concurrent
+            shard requests per node this search executes concurrently. This value
+            should be used to limit the impact of the search on the cluster in order
+            to limit the number of concurrent shard requests  Default: 5
+        :arg pre_filter_shard_size: A threshold that enforces a pre-
+            filter roundtrip to prefilter search shards based on query rewriting if
+            theƂ number of shards the search request expands to exceeds the
+            threshold. This filter roundtrip can limit the number of shards
+            significantly if for instance a shard can not match any documents based
+            on its rewrite method ie. if date filters are mandatory to match but the
+            shard bounds and the query are disjoint.
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg request_cache: Specify if request cache should be used for
+            this request or not, defaults to index level setting
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg routing: A comma-separated list of specific routing values
+        :arg scroll: Specify how long a consistent view of the index
+            should be maintained for scrolled search
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, dfs_query_then_fetch
+        :arg seq_no_primary_term: Specify whether to return sequence
+            number and primary term of the last modification of each hit
+        :arg size: Number of hits to return (default: 10)
+        :arg sort: A comma-separated list of <field>:<direction> pairs
+        :arg stats: Specific 'tag' of the request for logging and
+            statistical purposes
+        :arg stored_fields: A comma-separated list of stored fields to
+            return as part of a hit
+        :arg suggest_field: Specify which field to use for suggestions
+        :arg suggest_mode: Specify suggest mode  Valid choices: missing,
+            popular, always  Default: missing
+        :arg suggest_size: How many suggestions to return in response
+        :arg suggest_text: The source text for which the suggestions
+            should be returned
+        :arg terminate_after: The maximum number of documents to collect
+            for each shard, upon reaching which the query execution will terminate
+            early.
+        :arg timeout: Explicit operation timeout
+        :arg track_scores: Whether to calculate and return scores even
+            if they are not used for sorting
+        :arg track_total_hits: Indicate if the number of documents that
+            match the query should be tracked
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        :arg version: Specify whether to return document version as part
+            of a hit
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_search"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "local",
+        "preference",
+        "routing",
+    )
+    async def search_shards(self, index=None, params=None, headers=None):
+        """
+        Returns information about the indices and shards that a search request would be
+        executed against.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-shards.html>`_
+
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg routing: Specific routing value
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_search_shards"), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "if_primary_term",
+        "if_seq_no",
+        "lang",
+        "refresh",
+        "retry_on_conflict",
+        "routing",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    async def update(self, index, id, body, doc_type=None, params=None, headers=None):
+        """
+        Updates a document with a script or partial document.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-update.html>`_
+
+        :arg index: The name of the index
+        :arg id: Document ID
+        :arg body: The request definition requires either `script` or
+            partial `doc`
+        :arg doc_type: The type of the document
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg if_primary_term: only perform the update operation if the
+            last operation that has changed the document has the specified primary
+            term
+        :arg if_seq_no: only perform the update operation if the last
+            operation that has changed the document has the specified sequence
+            number
+        :arg lang: The script language (default: painless)
+        :arg refresh: If `true` then refresh the affected shards to make
+            this operation visible to search, if `wait_for` then wait for a refresh
+            to make this operation visible to search, if `false` (the default) then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        :arg retry_on_conflict: Specify how many times should the
+            operation be retried when a conflict occurs (default: 0)
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the update operation.
+            Defaults to 1, meaning the primary shard only. Set to `all` for all
+            shard copies, otherwise set to any non-negative value less than or equal
+            to the total number of copies for the shard (number of replicas + 1)
+        """
+        for param in (index, id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_update", id)
+        else:
+            path = _make_path(index, doc_type, id, "_update")
+
+        return await self.transport.perform_request(
+            "POST", path, params=params, headers=headers, body=body
+        )
+
+    @query_params("requests_per_second")
+    async def update_by_query_rethrottle(self, task_id, params=None, headers=None):
+        """
+        Changes the number of requests per second for a particular Update By Query
+        operation.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-update-by-query.html>`_
+
+        :arg task_id: The task id to rethrottle
+        :arg requests_per_second: The throttle to set on this request in
+            floating sub-requests per second. -1 means set no throttle.
+        """
+        if task_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'task_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_update_by_query", task_id, "_rethrottle"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def get_script_context(self, params=None, headers=None):
+        """
+        Returns all script contexts.
+        `<https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_script_context", params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_script_languages(self, params=None, headers=None):
+        """
+        Returns available script types, languages and contexts
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-scripting.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_script_language", params=params, headers=headers
+        )
+
+    @query_params(
+        "ccs_minimize_roundtrips",
+        "max_concurrent_searches",
+        "rest_total_hits_as_int",
+        "search_type",
+        "typed_keys",
+    )
+    async def msearch_template(
+        self, body, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Allows to execute several search template operations in one request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-multi-search.html>`_
+
+        :arg body: The request definitions (metadata-search request
+            definition pairs), separated by newlines
+        :arg index: A comma-separated list of index names to use as
+            default
+        :arg doc_type: A comma-separated list of document types to use
+            as default
+        :arg ccs_minimize_roundtrips: Indicates whether network round-
+            trips should be minimized as part of cross-cluster search requests
+            execution  Default: true
+        :arg max_concurrent_searches: Controls the maximum number of
+            concurrent searches the multi search api will execute
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, query_and_fetch, dfs_query_then_fetch,
+            dfs_query_and_fetch
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_msearch", "template"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "field_statistics",
+        "fields",
+        "ids",
+        "offsets",
+        "payloads",
+        "positions",
+        "preference",
+        "realtime",
+        "routing",
+        "term_statistics",
+        "version",
+        "version_type",
+    )
+    async def mtermvectors(
+        self, body=None, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Returns multiple termvectors in one request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-multi-termvectors.html>`_
+
+        :arg body: Define ids, documents, parameters or a list of
+            parameters per document here. You must at least provide a list of
+            document ids. See documentation.
+        :arg index: The index in which the document resides.
+        :arg doc_type: The type of the document.
+        :arg field_statistics: Specifies if document count, sum of
+            document frequencies and sum of total term frequencies should be
+            returned. Applies to all returned documents unless otherwise specified
+            in body "params" or "docs".  Default: True
+        :arg fields: A comma-separated list of fields to return. Applies
+            to all returned documents unless otherwise specified in body "params" or
+            "docs".
+        :arg ids: A comma-separated list of documents ids. You must
+            define ids as parameter or set "ids" or "docs" in the request body
+        :arg offsets: Specifies if term offsets should be returned.
+            Applies to all returned documents unless otherwise specified in body
+            "params" or "docs".  Default: True
+        :arg payloads: Specifies if term payloads should be returned.
+            Applies to all returned documents unless otherwise specified in body
+            "params" or "docs".  Default: True
+        :arg positions: Specifies if term positions should be returned.
+            Applies to all returned documents unless otherwise specified in body
+            "params" or "docs".  Default: True
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random) .Applies to all returned documents
+            unless otherwise specified in body "params" or "docs".
+        :arg realtime: Specifies if requests are real-time as opposed to
+            near-real-time (default: true).
+        :arg routing: Specific routing value. Applies to all returned
+            documents unless otherwise specified in body "params" or "docs".
+        :arg term_statistics: Specifies if total term frequency and
+            document frequency should be returned. Applies to all returned documents
+            unless otherwise specified in body "params" or "docs".
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_mtermvectors")
+        else:
+            path = _make_path(index, doc_type, "_mtermvectors")
+
+        return await self.transport.perform_request(
+            "POST", path, params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "ccs_minimize_roundtrips",
+        "expand_wildcards",
+        "explain",
+        "ignore_throttled",
+        "ignore_unavailable",
+        "preference",
+        "profile",
+        "rest_total_hits_as_int",
+        "routing",
+        "scroll",
+        "search_type",
+        "typed_keys",
+    )
+    async def search_template(
+        self, body, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Allows to use the Mustache language to pre-render a search definition.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-template.html>`_
+
+        :arg body: The search definition template and its params
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg doc_type: A comma-separated list of document types to
+            search; leave empty to perform the operation on all types
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg ccs_minimize_roundtrips: Indicates whether network round-
+            trips should be minimized as part of cross-cluster search requests
+            execution  Default: true
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg explain: Specify whether to return detailed information
+            about score computation as part of a hit
+        :arg ignore_throttled: Whether specified concrete, expanded or
+            aliased indices should be ignored when throttled
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg profile: Specify whether to profile the query execution
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg routing: A comma-separated list of specific routing values
+        :arg scroll: Specify how long a consistent view of the index
+            should be maintained for scrolled search
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, query_and_fetch, dfs_query_then_fetch,
+            dfs_query_and_fetch
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_search", "template"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "field_statistics",
+        "fields",
+        "offsets",
+        "payloads",
+        "positions",
+        "preference",
+        "realtime",
+        "routing",
+        "term_statistics",
+        "version",
+        "version_type",
+    )
+    async def termvectors(
+        self, index, body=None, doc_type=None, id=None, params=None, headers=None
+    ):
+        """
+        Returns information and statistics about terms in the fields of a particular
+        document.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-termvectors.html>`_
+
+        :arg index: The index in which the document resides.
+        :arg body: Define parameters and or supply a document to get
+            termvectors for. See documentation.
+        :arg doc_type: The type of the document.
+        :arg id: The id of the document, when not specified a doc param
+            should be supplied.
+        :arg field_statistics: Specifies if document count, sum of
+            document frequencies and sum of total term frequencies should be
+            returned.  Default: True
+        :arg fields: A comma-separated list of fields to return.
+        :arg offsets: Specifies if term offsets should be returned.
+            Default: True
+        :arg payloads: Specifies if term payloads should be returned.
+            Default: True
+        :arg positions: Specifies if term positions should be returned.
+            Default: True
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random).
+        :arg realtime: Specifies if request is real-time as opposed to
+            near-real-time (default: true).
+        :arg routing: Specific routing value.
+        :arg term_statistics: Specifies if total term frequency and
+            document frequency should be returned.
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_termvectors", id)
+        else:
+            path = _make_path(index, doc_type, id, "_termvectors")
+
+        return await self.transport.perform_request(
+            "POST", path, params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "allow_no_indices",
+        "analyze_wildcard",
+        "analyzer",
+        "conflicts",
+        "default_operator",
+        "df",
+        "expand_wildcards",
+        "from_",
+        "ignore_unavailable",
+        "lenient",
+        "max_docs",
+        "pipeline",
+        "preference",
+        "q",
+        "refresh",
+        "request_cache",
+        "requests_per_second",
+        "routing",
+        "scroll",
+        "scroll_size",
+        "search_timeout",
+        "search_type",
+        "size",
+        "slices",
+        "sort",
+        "stats",
+        "terminate_after",
+        "timeout",
+        "version",
+        "version_type",
+        "wait_for_active_shards",
+        "wait_for_completion",
+    )
+    async def update_by_query(
+        self, index, body=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Performs an update on every document in the index without changing the source,
+        for example to pick up a mapping change.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-update-by-query.html>`_
+
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg body: The search definition using the Query DSL
+        :arg doc_type: A comma-separated list of document types to
+            search; leave empty to perform the operation on all types
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg conflicts: What to do when the update by query hits version
+            conflicts?  Valid choices: abort, proceed  Default: abort
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg from_: Starting offset (default: 0)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg max_docs: Maximum number of documents to process (default:
+            all documents)
+        :arg pipeline: Ingest pipeline to set on index requests made by
+            this action. (default: none)
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg refresh: Should the affected indexes be refreshed?
+        :arg request_cache: Specify if request cache should be used for
+            this request or not, defaults to index level setting
+        :arg requests_per_second: The throttle to set on this request in
+            sub-requests per second. -1 means no throttle.
+        :arg routing: A comma-separated list of specific routing values
+        :arg scroll: Specify how long a consistent view of the index
+            should be maintained for scrolled search
+        :arg scroll_size: Size on the scroll request powering the update
+            by query  Default: 100
+        :arg search_timeout: Explicit timeout for each search request.
+            Defaults to no timeout.
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, dfs_query_then_fetch
+        :arg size: Deprecated, please use `max_docs` instead
+        :arg slices: The number of slices this task should be divided
+            into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be
+            set to `auto`.  Default: 1
+        :arg sort: A comma-separated list of <field>:<direction> pairs
+        :arg stats: Specific 'tag' of the request for logging and
+            statistical purposes
+        :arg terminate_after: The maximum number of documents to collect
+            for each shard, upon reaching which the query execution will terminate
+            early.
+        :arg timeout: Time each individual bulk request should wait for
+            shards that are unavailable.  Default: 1m
+        :arg version: Specify whether to return document version as part
+            of a hit
+        :arg version_type: Should the document increment the version
+            number (internal) on hit or not (reindex)
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the update by query
+            operation. Defaults to 1, meaning the primary shard only. Set to `all`
+            for all shard copies, otherwise set to any non-negative value less than
+            or equal to the total number of copies for the shard (number of replicas
+            + 1)
+        :arg wait_for_completion: Should the request should block until
+            the update by query operation is complete.  Default: True
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_update_by_query"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/_async/client/__pycache__/__init__.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be6e1ae905d0297518407802c0d5e35e0cf645dd
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/__init__.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/async_search.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/async_search.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e3b50941422bb674d41c1372bc7effebe6acb65b
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/async_search.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/autoscaling.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/autoscaling.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22de3681a284156314f032db9bd3477a661c579c
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/autoscaling.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/cat.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/cat.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..87c40203846d252b03785687a2f3e248716b6bc2
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/cat.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/ccr.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/ccr.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..afb85b6f166dce07cb7ea21d1237dc71cc3a5e94
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/ccr.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/cluster.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/cluster.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..17489dc5923f55cf28854803f01582255e021b92
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/cluster.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/data_frame.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/data_frame.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3181afb43a832b09dfbe8bcad3ebb89c35b68be6
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/data_frame.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/deprecation.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/deprecation.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..50a806eb6c837faa51644424af62cf999f4cf9aa
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/deprecation.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/enrich.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/enrich.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66077a4c6f3fc65ae22419c6896b275ad0aeb247
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/enrich.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/eql.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/eql.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10defd8d71892fc261cedaa1830fa585b03d531f
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/eql.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/graph.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/graph.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ee77591e28d99808a4764e87d7deb9c8c1fd331c
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/graph.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/ilm.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/ilm.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed637241ac95f43eb34a9fca47cdc9c459019abc
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/ilm.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/indices.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/indices.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d8c4bc3e1ccca7a069b9e26a785aa7ea04786e5
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/indices.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/ingest.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/ingest.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef099b6142011e9820f0b6890bf087298b16147a
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/ingest.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/license.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/license.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38f3805c91211e18a856c62c32ceb7a3fc7a4f9b
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/license.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/migration.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/migration.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11e437152b14dbf4e31637bdb2c3fd8b3ebc86da
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/migration.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/ml.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/ml.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f50f61c225d9f92f70ba30410ff5e73f99add4d5
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/ml.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/monitoring.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/monitoring.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..21553846b0b212e395cebd7f8d16c9ffea4b0fb6
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/monitoring.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/nodes.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/nodes.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3565d03b1fbd29a196d4acbbfc5c5d68e8bbbeb8
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/nodes.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/remote.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/remote.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..faebfee7912d1692327df17ec497bd729b5c32fc
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/remote.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/rollup.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/rollup.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d28e011457d220c1385cdc80b6c4cfacc7ee07ad
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/rollup.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/searchable_snapshots.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/searchable_snapshots.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e040815933cb50c161a7662727fc40c40f3a72df
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/searchable_snapshots.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/security.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/security.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..29c4336c726885ed9609efdf473c61286358a522
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/security.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/slm.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/slm.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bd36d6f1eaaf9aa053c8f58a740a8fcb38108f96
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/slm.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/snapshot.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/snapshot.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c169869a7603cac5dbc04cd656d6308359fc41c4
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/snapshot.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/sql.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/sql.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a14da49b69c3348dc19fefb7a4ce7a3ee0d37e7
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/sql.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/ssl.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/ssl.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88ebb5369048052a150ad4c3cf742d23a87f665c
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/ssl.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/tasks.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/tasks.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0a5358ebda72e71b3ecb65f055b146b1329d646d
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/tasks.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/transform.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/transform.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..280b1d5ac2d75f13fd18b6e5cb6b44d8b3c34231
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/transform.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/utils.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bd39dcf34041076978755fcb0c141f636b4a366f
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/utils.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/watcher.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/watcher.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64cbf5bdc3fcd9d9cd21ea493352fb08d6367ee7
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/watcher.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/__pycache__/xpack.cpython-38.pyc b/elasticsearch_7/_async/client/__pycache__/xpack.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1dd3d04991ac58c9ba501f62bb30caadbbe554be
Binary files /dev/null and b/elasticsearch_7/_async/client/__pycache__/xpack.cpython-38.pyc differ
diff --git a/elasticsearch_7/_async/client/async_search.py b/elasticsearch_7/_async/client/async_search.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1adb2eb23d712cb92494d0a1c28a736dce899de
--- /dev/null
+++ b/elasticsearch_7/_async/client/async_search.py
@@ -0,0 +1,191 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, SKIP_IN_PATH, query_params, _make_path
+
+
+class AsyncSearchClient(NamespacedClient):
+    @query_params()
+    async def delete(self, id, params=None, headers=None):
+        """
+        Deletes an async search by ID. If the search is still running, the search
+        request will be cancelled. Otherwise, the saved search results are deleted.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/async-search.html>`_
+
+        :arg id: The async search ID
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "DELETE", _make_path("_async_search", id), params=params, headers=headers
+        )
+
+    @query_params("keep_alive", "typed_keys", "wait_for_completion_timeout")
+    async def get(self, id, params=None, headers=None):
+        """
+        Retrieves the results of a previously submitted async search request given its
+        ID.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/async-search.html>`_
+
+        :arg id: The async search ID
+        :arg keep_alive: Specify the time interval in which the results
+            (partial or final) for this search will be available
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        :arg wait_for_completion_timeout: Specify the time that the
+            request should block waiting for the final response
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "GET", _make_path("_async_search", id), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "allow_no_indices",
+        "allow_partial_search_results",
+        "analyze_wildcard",
+        "analyzer",
+        "batched_reduce_size",
+        "default_operator",
+        "df",
+        "docvalue_fields",
+        "expand_wildcards",
+        "explain",
+        "from_",
+        "ignore_throttled",
+        "ignore_unavailable",
+        "keep_alive",
+        "keep_on_completion",
+        "lenient",
+        "max_concurrent_shard_requests",
+        "preference",
+        "q",
+        "request_cache",
+        "routing",
+        "search_type",
+        "seq_no_primary_term",
+        "size",
+        "sort",
+        "stats",
+        "stored_fields",
+        "suggest_field",
+        "suggest_mode",
+        "suggest_size",
+        "suggest_text",
+        "terminate_after",
+        "timeout",
+        "track_scores",
+        "track_total_hits",
+        "typed_keys",
+        "version",
+        "wait_for_completion_timeout",
+    )
+    async def submit(self, body=None, index=None, params=None, headers=None):
+        """
+        Executes a search request asynchronously.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/async-search.html>`_
+
+        :arg body: The search definition using the Query DSL
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg allow_partial_search_results: Indicate if an error should
+            be returned if there is a partial search failure or timeout  Default:
+            True
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg batched_reduce_size: The number of shard results that
+            should be reduced at once on the coordinating node. This value should be
+            used as the granularity at which progress results will be made
+            available.  Default: 5
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg docvalue_fields: A comma-separated list of fields to return
+            as the docvalue representation of a field for each hit
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg explain: Specify whether to return detailed information
+            about score computation as part of a hit
+        :arg from_: Starting offset (default: 0)
+        :arg ignore_throttled: Whether specified concrete, expanded or
+            aliased indices should be ignored when throttled
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg keep_alive: Update the time interval in which the results
+            (partial or final) for this search will be available  Default: 5d
+        :arg keep_on_completion: Control whether the response should be
+            stored in the cluster if it completed within the provided
+            [wait_for_completion] time (default: false)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg max_concurrent_shard_requests: The number of concurrent
+            shard requests per node this search executes concurrently. This value
+            should be used to limit the impact of the search on the cluster in order
+            to limit the number of concurrent shard requests  Default: 5
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg request_cache: Specify if request cache should be used for
+            this request or not, defaults to true
+        :arg routing: A comma-separated list of specific routing values
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, dfs_query_then_fetch
+        :arg seq_no_primary_term: Specify whether to return sequence
+            number and primary term of the last modification of each hit
+        :arg size: Number of hits to return (default: 10)
+        :arg sort: A comma-separated list of <field>:<direction> pairs
+        :arg stats: Specific 'tag' of the request for logging and
+            statistical purposes
+        :arg stored_fields: A comma-separated list of stored fields to
+            return as part of a hit
+        :arg suggest_field: Specify which field to use for suggestions
+        :arg suggest_mode: Specify suggest mode  Valid choices: missing,
+            popular, always  Default: missing
+        :arg suggest_size: How many suggestions to return in response
+        :arg suggest_text: The source text for which the suggestions
+            should be returned
+        :arg terminate_after: The maximum number of documents to collect
+            for each shard, upon reaching which the query execution will terminate
+            early.
+        :arg timeout: Explicit operation timeout
+        :arg track_scores: Whether to calculate and return scores even
+            if they are not used for sorting
+        :arg track_total_hits: Indicate if the number of documents that
+            match the query should be tracked
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        :arg version: Specify whether to return document version as part
+            of a hit
+        :arg wait_for_completion_timeout: Specify the time that the
+            request should block waiting for the final response  Default: 1s
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, "_async_search"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/_async/client/autoscaling.py b/elasticsearch_7/_async/client/autoscaling.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa69c303d25925cdd62d6b1750d89299d74c5980
--- /dev/null
+++ b/elasticsearch_7/_async/client/autoscaling.py
@@ -0,0 +1,75 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, SKIP_IN_PATH, _make_path
+
+
+class AutoscalingClient(NamespacedClient):
+    @query_params()
+    async def get_autoscaling_decision(self, params=None, headers=None):
+        """
+        Gets the current autoscaling decision based on the configured autoscaling
+        policy, indicating whether or not autoscaling is needed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-decision.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_autoscaling/decision", params=params, headers=headers
+        )
+
+    @query_params()
+    async def delete_autoscaling_policy(self, name, params=None, headers=None):
+        """
+        Deletes an autoscaling policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-delete-autoscaling-policy.html>`_
+
+        :arg name: the name of the autoscaling policy
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_autoscaling", "policy", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def put_autoscaling_policy(self, name, body, params=None, headers=None):
+        """
+        Creates a new autoscaling policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-put-autoscaling-policy.html>`_
+
+        :arg name: the name of the autoscaling policy
+        :arg body: the specification of the autoscaling policy
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_autoscaling", "policy", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def get_autoscaling_policy(self, name, params=None, headers=None):
+        """
+        Retrieves an autoscaling policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-policy.html>`_
+
+        :arg name: the name of the autoscaling policy
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_autoscaling", "policy", name),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/_async/client/cat.py b/elasticsearch_7/_async/client/cat.py
new file mode 100644
index 0000000000000000000000000000000000000000..55af0ca5a41bb2550f32cf858c57ce2d98aa1bd6
--- /dev/null
+++ b/elasticsearch_7/_async/client/cat.py
@@ -0,0 +1,724 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path
+
+
+class CatClient(NamespacedClient):
+    @query_params("expand_wildcards", "format", "h", "help", "local", "s", "v")
+    async def aliases(self, name=None, params=None, headers=None):
+        """
+        Shows information about currently configured aliases to indices including
+        filter and routing infos.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-alias.html>`_
+
+        :arg name: A comma-separated list of alias names to return
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_cat", "aliases", name), params=params, headers=headers
+        )
+
+    @query_params("bytes", "format", "h", "help", "local", "master_timeout", "s", "v")
+    async def allocation(self, node_id=None, params=None, headers=None):
+        """
+        Provides a snapshot of how many shards are allocated to each data node and how
+        much disk space they are using.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-allocation.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "allocation", node_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("format", "h", "help", "s", "v")
+    async def count(self, index=None, params=None, headers=None):
+        """
+        Provides quick access to the document count of the entire cluster, or
+        individual indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-count.html>`_
+
+        :arg index: A comma-separated list of index names to limit the
+            returned information
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_cat", "count", index), params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "s", "time", "ts", "v")
+    async def health(self, params=None, headers=None):
+        """
+        Returns a concise representation of the cluster health.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-health.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg ts: Set to false to disable timestamping  Default: True
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", "/_cat/health", params=params, headers=headers
+        )
+
+    @query_params("help", "s")
+    async def help(self, params=None, headers=None):
+        """
+        Returns help for the Cat APIs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat.html>`_
+
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        """
+        return await self.transport.perform_request(
+            "GET", "/_cat", params=params, headers=headers
+        )
+
+    @query_params(
+        "bytes",
+        "expand_wildcards",
+        "format",
+        "h",
+        "health",
+        "help",
+        "include_unloaded_segments",
+        "local",
+        "master_timeout",
+        "pri",
+        "s",
+        "time",
+        "v",
+    )
+    async def indices(self, index=None, params=None, headers=None):
+        """
+        Returns information about indices: number of primaries and replicas, document
+        counts, disk size, ...
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-indices.html>`_
+
+        :arg index: A comma-separated list of index names to limit the
+            returned information
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg health: A health status ("green", "yellow", or "red" to
+            filter only indices matching the specified health status  Valid choices:
+            green, yellow, red
+        :arg help: Return help information
+        :arg include_unloaded_segments: If set to true segment stats
+            will include stats for segments that are not currently loaded into
+            memory
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg pri: Set to true to return stats only for primary shards
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_cat", "indices", index), params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "v")
+    async def master(self, params=None, headers=None):
+        """
+        Returns information about the master node.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-master.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", "/_cat/master", params=params, headers=headers
+        )
+
+    @query_params(
+        "bytes",
+        "format",
+        "full_id",
+        "h",
+        "help",
+        "local",
+        "master_timeout",
+        "s",
+        "time",
+        "v",
+    )
+    async def nodes(self, params=None, headers=None):
+        """
+        Returns basic statistics about performance of cluster nodes.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-nodes.html>`_
+
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg full_id: Return the full node ID instead of the shortened
+            version (default: false)
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Calculate the selected nodes using the local cluster
+            state rather than the state from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", "/_cat/nodes", params=params, headers=headers
+        )
+
+    @query_params(
+        "active_only", "bytes", "detailed", "format", "h", "help", "s", "time", "v"
+    )
+    async def recovery(self, index=None, params=None, headers=None):
+        """
+        Returns information about index shard recoveries, both on-going completed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-recovery.html>`_
+
+        :arg index: Comma-separated list or wildcard expression of index
+            names to limit the returned information
+        :arg active_only: If `true`, the response only includes ongoing
+            shard recoveries
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg detailed: If `true`, the response includes detailed
+            information about shard recoveries
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_cat", "recovery", index), params=params, headers=headers
+        )
+
+    @query_params(
+        "bytes", "format", "h", "help", "local", "master_timeout", "s", "time", "v"
+    )
+    async def shards(self, index=None, params=None, headers=None):
+        """
+        Provides a detailed view of shard allocation on nodes.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-shards.html>`_
+
+        :arg index: A comma-separated list of index names to limit the
+            returned information
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_cat", "shards", index), params=params, headers=headers
+        )
+
+    @query_params("bytes", "format", "h", "help", "s", "v")
+    async def segments(self, index=None, params=None, headers=None):
+        """
+        Provides low-level information about the segments in the shards of an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-segments.html>`_
+
+        :arg index: A comma-separated list of index names to limit the
+            returned information
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_cat", "segments", index), params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "time", "v")
+    async def pending_tasks(self, params=None, headers=None):
+        """
+        Returns a concise representation of the cluster pending tasks.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-pending-tasks.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", "/_cat/pending_tasks", params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "size", "v")
+    async def thread_pool(self, thread_pool_patterns=None, params=None, headers=None):
+        """
+        Returns cluster-wide thread pool statistics per node. By default the active,
+        queue and rejected statistics are returned for all thread pools.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-thread-pool.html>`_
+
+        :arg thread_pool_patterns: A comma-separated list of regular-
+            expressions to filter the thread pools in the output
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg size: The multiplier in which to display values  Valid
+            choices: , k, m, g, t, p
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "thread_pool", thread_pool_patterns),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("bytes", "format", "h", "help", "s", "v")
+    async def fielddata(self, fields=None, params=None, headers=None):
+        """
+        Shows how much heap memory is currently being used by fielddata on every data
+        node in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-fielddata.html>`_
+
+        :arg fields: A comma-separated list of fields to return in the
+            output
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "fielddata", fields),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "v")
+    async def plugins(self, params=None, headers=None):
+        """
+        Returns information about installed plugins across nodes node.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-plugins.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", "/_cat/plugins", params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "v")
+    async def nodeattrs(self, params=None, headers=None):
+        """
+        Returns information about custom node attributes.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-nodeattrs.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", "/_cat/nodeattrs", params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "v")
+    async def repositories(self, params=None, headers=None):
+        """
+        Returns information about snapshot repositories registered in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-repositories.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", "/_cat/repositories", params=params, headers=headers
+        )
+
+    @query_params(
+        "format", "h", "help", "ignore_unavailable", "master_timeout", "s", "time", "v"
+    )
+    async def snapshots(self, repository=None, params=None, headers=None):
+        """
+        Returns all snapshots in a specific repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-snapshots.html>`_
+
+        :arg repository: Name of repository from which to fetch the
+            snapshot information
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg ignore_unavailable: Set to true to ignore unavailable
+            snapshots
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "snapshots", repository),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "actions",
+        "detailed",
+        "format",
+        "h",
+        "help",
+        "node_id",
+        "parent_task",
+        "s",
+        "time",
+        "v",
+    )
+    async def tasks(self, params=None, headers=None):
+        """
+        Returns information about the tasks currently executing on one or more nodes in
+        the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/tasks.html>`_
+
+        :arg actions: A comma-separated list of actions that should be
+            returned. Leave empty to return all.
+        :arg detailed: Return detailed task information (default: false)
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg parent_task: Return tasks with specified parent task id.
+            Set to -1 to return all.
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", "/_cat/tasks", params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "v")
+    async def templates(self, name=None, params=None, headers=None):
+        """
+        Returns information about existing templates.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-templates.html>`_
+
+        :arg name: A pattern that returned template names must match
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_cat", "templates", name), params=params, headers=headers
+        )
+
+    @query_params("allow_no_match", "bytes", "format", "h", "help", "s", "time", "v")
+    async def ml_data_frame_analytics(self, id=None, params=None, headers=None):
+        """
+        Gets configuration and usage information about data frame analytics jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no configs. (This includes `_all` string or when no configs have
+            been specified)
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "ml", "data_frame", "analytics", id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_datafeeds", "format", "h", "help", "s", "time", "v")
+    async def ml_datafeeds(self, datafeed_id=None, params=None, headers=None):
+        """
+        Gets configuration and usage information about datafeeds.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-datafeeds.html>`_
+
+        :arg datafeed_id: The ID of the datafeeds stats to fetch
+        :arg allow_no_datafeeds: Whether to ignore if a wildcard
+            expression matches no datafeeds. (This includes `_all` string or when no
+            datafeeds have been specified)
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "ml", "datafeeds", datafeed_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_jobs", "bytes", "format", "h", "help", "s", "time", "v")
+    async def ml_jobs(self, job_id=None, params=None, headers=None):
+        """
+        Gets configuration and usage information about anomaly detection jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-anomaly-detectors.html>`_
+
+        :arg job_id: The ID of the jobs stats to fetch
+        :arg allow_no_jobs: Whether to ignore if a wildcard expression
+            matches no jobs. (This includes `_all` string or when no jobs have been
+            specified)
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "ml", "anomaly_detectors", job_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_match",
+        "bytes",
+        "format",
+        "from_",
+        "h",
+        "help",
+        "s",
+        "size",
+        "time",
+        "v",
+    )
+    async def ml_trained_models(self, model_id=None, params=None, headers=None):
+        """
+        Gets configuration and usage information about inference trained models.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-trained-model.html>`_
+
+        :arg model_id: The ID of the trained models stats to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no trained models. (This includes `_all` string or when no
+            trained models have been specified)  Default: True
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg from_: skips a number of trained models
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg size: specifies a max number of trained models to get
+            Default: 100
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "ml", "trained_models", model_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_match", "format", "from_", "h", "help", "s", "size", "time", "v"
+    )
+    async def transforms(self, transform_id=None, params=None, headers=None):
+        """
+        Gets configuration and usage information about transforms.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-transforms.html>`_
+
+        :arg transform_id: The id of the transform for which to get
+            stats. '_all' or '*' implies all transforms
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no transforms. (This includes `_all` string or when no
+            transforms have been specified)
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg from_: skips a number of transform configs, defaults to 0
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg size: specifies a max number of transforms to get, defaults
+            to 100
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "transforms", transform_id),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/_async/client/ccr.py b/elasticsearch_7/_async/client/ccr.py
new file mode 100644
index 0000000000000000000000000000000000000000..62335060401ee3f944864ebe65ad9728b05f3c04
--- /dev/null
+++ b/elasticsearch_7/_async/client/ccr.py
@@ -0,0 +1,259 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class CcrClient(NamespacedClient):
+    @query_params()
+    async def delete_auto_follow_pattern(self, name, params=None, headers=None):
+        """
+        Deletes auto-follow patterns.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-delete-auto-follow-pattern.html>`_
+
+        :arg name: The name of the auto follow pattern.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ccr", "auto_follow", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("wait_for_active_shards")
+    async def follow(self, index, body, params=None, headers=None):
+        """
+        Creates a new follower index configured to follow the referenced leader index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-put-follow.html>`_
+
+        :arg index: The name of the follower index
+        :arg body: The name of the leader index and other optional ccr
+            related parameters
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before returning. Defaults to 0. Set to `all` for
+            all shard copies, otherwise set to any non-negative value less than or
+            equal to the total number of copies for the shard (number of replicas +
+            1)  Default: 0
+        """
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_ccr", "follow"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def follow_info(self, index, params=None, headers=None):
+        """
+        Retrieves information about all follower indices, including parameters and
+        status for each follower index
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-get-follow-info.html>`_
+
+        :arg index: A comma-separated list of index patterns; use `_all`
+            to perform the operation on all indices
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_ccr", "info"), params=params, headers=headers
+        )
+
+    @query_params()
+    async def follow_stats(self, index, params=None, headers=None):
+        """
+        Retrieves follower stats. return shard-level stats about the following tasks
+        associated with each shard for the specified indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-get-follow-stats.html>`_
+
+        :arg index: A comma-separated list of index patterns; use `_all`
+            to perform the operation on all indices
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_ccr", "stats"), params=params, headers=headers
+        )
+
+    @query_params()
+    async def forget_follower(self, index, body, params=None, headers=None):
+        """
+        Removes the follower retention leases from the leader.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-post-forget-follower.html>`_
+
+        :arg index: the name of the leader index for which specified
+            follower retention leases should be removed
+        :arg body: the name and UUID of the follower index, the name of
+            the cluster containing the follower index, and the alias from the
+            perspective of that cluster for the remote cluster containing the leader
+            index
+        """
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, "_ccr", "forget_follower"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def get_auto_follow_pattern(self, name=None, params=None, headers=None):
+        """
+        Gets configured auto-follow patterns. Returns the specified auto-follow pattern
+        collection.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-get-auto-follow-pattern.html>`_
+
+        :arg name: The name of the auto follow pattern.
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ccr", "auto_follow", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def pause_follow(self, index, params=None, headers=None):
+        """
+        Pauses a follower index. The follower index will not fetch any additional
+        operations from the leader index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-post-pause-follow.html>`_
+
+        :arg index: The name of the follower index that should pause
+            following its leader index.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, "_ccr", "pause_follow"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def put_auto_follow_pattern(self, name, body, params=None, headers=None):
+        """
+        Creates a new named collection of auto-follow patterns against a specified
+        remote cluster. Newly created indices on the remote cluster matching any of the
+        specified patterns will be automatically configured as follower indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-put-auto-follow-pattern.html>`_
+
+        :arg name: The name of the auto follow pattern.
+        :arg body: The specification of the auto follow pattern
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_ccr", "auto_follow", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def resume_follow(self, index, body=None, params=None, headers=None):
+        """
+        Resumes a follower index that has been paused
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-post-resume-follow.html>`_
+
+        :arg index: The name of the follow index to resume following.
+        :arg body: The name of the leader index and other optional ccr
+            related parameters
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, "_ccr", "resume_follow"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def stats(self, params=None, headers=None):
+        """
+        Gets all stats related to cross-cluster replication.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-get-stats.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_ccr/stats", params=params, headers=headers
+        )
+
+    @query_params()
+    async def unfollow(self, index, params=None, headers=None):
+        """
+        Stops the following task associated with a follower index and removes index
+        metadata and settings associated with cross-cluster replication.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-post-unfollow.html>`_
+
+        :arg index: The name of the follower index that should be turned
+            into a regular index.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, "_ccr", "unfollow"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def pause_auto_follow_pattern(self, name, params=None, headers=None):
+        """
+        Pauses an auto-follow pattern
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-pause-auto-follow-pattern.html>`_
+
+        :arg name: The name of the auto follow pattern that should pause
+            discovering new indices to follow.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ccr", "auto_follow", name, "pause"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def resume_auto_follow_pattern(self, name, params=None, headers=None):
+        """
+        Resumes an auto-follow pattern that has been paused
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-resume-auto-follow-pattern.html>`_
+
+        :arg name: The name of the auto follow pattern to resume
+            discovering new indices to follow.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ccr", "auto_follow", name, "resume"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/_async/client/cluster.py b/elasticsearch_7/_async/client/cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8afd2b3c26c7d8113770e4bc15be373d4a7507d
--- /dev/null
+++ b/elasticsearch_7/_async/client/cluster.py
@@ -0,0 +1,361 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class ClusterClient(NamespacedClient):
+    @query_params(
+        "expand_wildcards",
+        "level",
+        "local",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+        "wait_for_events",
+        "wait_for_no_initializing_shards",
+        "wait_for_no_relocating_shards",
+        "wait_for_nodes",
+        "wait_for_status",
+    )
+    async def health(self, index=None, params=None, headers=None):
+        """
+        Returns basic information about the health of the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-health.html>`_
+
+        :arg index: Limit the information returned to a specific index
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg level: Specify the level of detail for returned information
+            Valid choices: cluster, indices, shards  Default: cluster
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Wait until the specified number of
+            shards is active
+        :arg wait_for_events: Wait until all currently queued events
+            with the given priority are processed  Valid choices: immediate, urgent,
+            high, normal, low, languid
+        :arg wait_for_no_initializing_shards: Whether to wait until
+            there are no initializing shards in the cluster
+        :arg wait_for_no_relocating_shards: Whether to wait until there
+            are no relocating shards in the cluster
+        :arg wait_for_nodes: Wait until the specified number of nodes is
+            available
+        :arg wait_for_status: Wait until cluster is in a specific state
+            Valid choices: green, yellow, red
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_cluster", "health", index),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("local", "master_timeout")
+    async def pending_tasks(self, params=None, headers=None):
+        """
+        Returns a list of any cluster-level changes (e.g. create index, update mapping,
+        allocate or fail shard) which have not yet been executed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-pending.html>`_
+
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        return await self.transport.perform_request(
+            "GET", "/_cluster/pending_tasks", params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flat_settings",
+        "ignore_unavailable",
+        "local",
+        "master_timeout",
+        "wait_for_metadata_version",
+        "wait_for_timeout",
+    )
+    async def state(self, metric=None, index=None, params=None, headers=None):
+        """
+        Returns a comprehensive information about the state of the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-state.html>`_
+
+        :arg metric: Limit the information returned to the specified
+            metrics  Valid choices: _all, blocks, metadata, nodes, routing_table,
+            routing_nodes, master_node, version
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg wait_for_metadata_version: Wait for the metadata version to
+            be equal or greater than the specified metadata version
+        :arg wait_for_timeout: The maximum time to wait for
+            wait_for_metadata_version before timing out
+        """
+        if index and metric in SKIP_IN_PATH:
+            metric = "_all"
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_cluster", "state", metric, index),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("flat_settings", "timeout")
+    async def stats(self, node_id=None, params=None, headers=None):
+        """
+        Returns high-level overview of cluster statistics.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-stats.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg timeout: Explicit operation timeout
+        """
+        return await self.transport.perform_request(
+            "GET",
+            "/_cluster/stats"
+            if node_id in SKIP_IN_PATH
+            else _make_path("_cluster", "stats", "nodes", node_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "dry_run", "explain", "master_timeout", "metric", "retry_failed", "timeout"
+    )
+    async def reroute(self, body=None, params=None, headers=None):
+        """
+        Allows to manually change the allocation of individual shards in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-reroute.html>`_
+
+        :arg body: The definition of `commands` to perform (`move`,
+            `cancel`, `allocate`)
+        :arg dry_run: Simulate the operation only and return the
+            resulting state
+        :arg explain: Return an explanation of why the commands can or
+            cannot be executed
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg metric: Limit the information returned to the specified
+            metrics. Defaults to all but metadata  Valid choices: _all, blocks,
+            metadata, nodes, routing_table, master_node, version
+        :arg retry_failed: Retries allocation of shards that are blocked
+            due to too many subsequent allocation failures
+        :arg timeout: Explicit operation timeout
+        """
+        return await self.transport.perform_request(
+            "POST", "/_cluster/reroute", params=params, headers=headers, body=body
+        )
+
+    @query_params("flat_settings", "include_defaults", "master_timeout", "timeout")
+    async def get_settings(self, params=None, headers=None):
+        """
+        Returns cluster settings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-update-settings.html>`_
+
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg include_defaults: Whether to return all default clusters
+            setting.
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        return await self.transport.perform_request(
+            "GET", "/_cluster/settings", params=params, headers=headers
+        )
+
+    @query_params("flat_settings", "master_timeout", "timeout")
+    async def put_settings(self, body, params=None, headers=None):
+        """
+        Updates the cluster settings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-update-settings.html>`_
+
+        :arg body: The settings to be updated. Can be either `transient`
+            or `persistent` (survives cluster restart).
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "PUT", "/_cluster/settings", params=params, headers=headers, body=body
+        )
+
+    @query_params()
+    async def remote_info(self, params=None, headers=None):
+        """
+        Returns the information about configured remote clusters.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-remote-info.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_remote/info", params=params, headers=headers
+        )
+
+    @query_params("include_disk_info", "include_yes_decisions")
+    async def allocation_explain(self, body=None, params=None, headers=None):
+        """
+        Provides explanations for shard allocations in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-allocation-explain.html>`_
+
+        :arg body: The index, shard, and primary flag to explain. Empty
+            means 'explain the first unassigned shard'
+        :arg include_disk_info: Return information about disk usage and
+            shard sizes (default: false)
+        :arg include_yes_decisions: Return 'YES' decisions in
+            explanation (default: false)
+        """
+        return await self.transport.perform_request(
+            "POST",
+            "/_cluster/allocation/explain",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def delete_component_template(self, name, params=None, headers=None):
+        """
+        Deletes a component template
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-component-template.html>`_
+
+        :arg name: The name of the template
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_component_template", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("local", "master_timeout")
+    async def get_component_template(self, name=None, params=None, headers=None):
+        """
+        Returns one or more component templates
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-component-template.html>`_
+
+        :arg name: The comma separated names of the component templates
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_component_template", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("create", "master_timeout", "timeout")
+    async def put_component_template(self, name, body, params=None, headers=None):
+        """
+        Creates or updates a component template
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-component-template.html>`_
+
+        :arg name: The name of the template
+        :arg body: The template definition
+        :arg create: Whether the index template should only be added if
+            new or can also replace an existing one
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_component_template", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("local", "master_timeout")
+    async def exists_component_template(self, name, params=None, headers=None):
+        """
+        Returns information about whether a particular component template exist
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-component-template.html>`_
+
+        :arg name: The name of the template
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "HEAD",
+            _make_path("_component_template", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("wait_for_removal")
+    async def delete_voting_config_exclusions(self, params=None, headers=None):
+        """
+        Clears cluster voting config exclusions.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/voting-config-exclusions.html>`_
+
+        :arg wait_for_removal: Specifies whether to wait for all
+            excluded nodes to be removed from the cluster before clearing the voting
+            configuration exclusions list.  Default: True
+        """
+        return await self.transport.perform_request(
+            "DELETE",
+            "/_cluster/voting_config_exclusions",
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("node_ids", "node_names", "timeout")
+    async def post_voting_config_exclusions(self, params=None, headers=None):
+        """
+        Updates the cluster voting config exclusions by node ids or node names.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/voting-config-exclusions.html>`_
+
+        :arg node_ids: A comma-separated list of the persistent ids of
+            the nodes to exclude from the voting configuration. If specified, you
+            may not also specify ?node_names.
+        :arg node_names: A comma-separated list of the names of the
+            nodes to exclude from the voting configuration. If specified, you may
+            not also specify ?node_ids.
+        :arg timeout: Explicit operation timeout  Default: 30s
+        """
+        return await self.transport.perform_request(
+            "POST", "/_cluster/voting_config_exclusions", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/client/data_frame.py b/elasticsearch_7/_async/client/data_frame.py
new file mode 100644
index 0000000000000000000000000000000000000000..234079a711ff89ef7cc4f29919b534eab9274240
--- /dev/null
+++ b/elasticsearch_7/_async/client/data_frame.py
@@ -0,0 +1,142 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class Data_FrameClient(NamespacedClient):
+    @query_params()
+    async def delete_data_frame_transform(
+        self, transform_id, params=None, headers=None
+    ):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/delete-data-frame-transform.html>`_
+
+        :arg transform_id: The id of the transform to delete
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_data_frame", "transforms", transform_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("from_", "size")
+    async def get_data_frame_transform(
+        self, transform_id=None, params=None, headers=None
+    ):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/get-data-frame-transform.html>`_
+
+        :arg transform_id: The id or comma delimited list of id expressions of
+            the transforms to get, '_all' or '*' implies get all transforms
+        :arg from_: skips a number of transform configs, defaults to 0
+        :arg size: specifies a max number of transforms to get, defaults to 100
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_data_frame", "transforms", transform_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def get_data_frame_transform_stats(
+        self, transform_id=None, params=None, headers=None
+    ):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/get-data-frame-transform-stats.html>`_
+
+        :arg transform_id: The id of the transform for which to get stats.
+            '_all' or '*' implies all transforms
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_data_frame", "transforms", transform_id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def preview_data_frame_transform(self, body, params=None, headers=None):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/preview-data-frame-transform.html>`_
+
+        :arg body: The definition for the data_frame transform to preview
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+        return self.transport.perform_request(
+            "POST",
+            "/_data_frame/transforms/_preview",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def put_data_frame_transform(
+        self, transform_id, body, params=None, headers=None
+    ):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/put-data-frame-transform.html>`_
+
+        :arg transform_id: The id of the new transform.
+        :arg body: The data frame transform definition
+        """
+        for param in (transform_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_data_frame", "transforms", transform_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("timeout")
+    async def start_data_frame_transform(self, transform_id, params=None, headers=None):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/start-data-frame-transform.html>`_
+
+        :arg transform_id: The id of the transform to start
+        :arg timeout: Controls the time to wait for the transform to start
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_data_frame", "transforms", transform_id, "_start"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("timeout", "wait_for_completion")
+    async def stop_data_frame_transform(self, transform_id, params=None, headers=None):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/stop-data-frame-transform.html>`_
+
+        :arg transform_id: The id of the transform to stop
+        :arg timeout: Controls the time to wait until the transform has stopped.
+            Default to 30 seconds
+        :arg wait_for_completion: Whether to wait for the transform to fully
+            stop before returning or not. Default to false
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_data_frame", "transforms", transform_id, "_stop"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/_async/client/deprecation.py b/elasticsearch_7/_async/client/deprecation.py
new file mode 100644
index 0000000000000000000000000000000000000000..deeb978dbfa719d013f9606a0e6eebd797382668
--- /dev/null
+++ b/elasticsearch_7/_async/client/deprecation.py
@@ -0,0 +1,21 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path
+
+
+class DeprecationClient(NamespacedClient):
+    @query_params()
+    async def info(self, index=None, params=None, headers=None):
+        """
+        `<http://www.elastic.co/guide/en/migration/7.x/migration-api-deprecation.html>`_
+
+        :arg index: Index pattern
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path(index, "_xpack", "migration", "deprecations"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/_async/client/enrich.py b/elasticsearch_7/_async/client/enrich.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa7129338adc6fb15095b8a588b7e1ba031813ab
--- /dev/null
+++ b/elasticsearch_7/_async/client/enrich.py
@@ -0,0 +1,89 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class EnrichClient(NamespacedClient):
+    @query_params()
+    async def delete_policy(self, name, params=None, headers=None):
+        """
+        Deletes an existing enrich policy and its enrich index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-enrich-policy-api.html>`_
+
+        :arg name: The name of the enrich policy
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_enrich", "policy", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("wait_for_completion")
+    async def execute_policy(self, name, params=None, headers=None):
+        """
+        Creates the enrich index for an existing enrich policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/execute-enrich-policy-api.html>`_
+
+        :arg name: The name of the enrich policy
+        :arg wait_for_completion: Should the request should block until
+            the execution is complete.  Default: True
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_enrich", "policy", name, "_execute"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def get_policy(self, name=None, params=None, headers=None):
+        """
+        Gets information about an enrich policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-enrich-policy-api.html>`_
+
+        :arg name: A comma-separated list of enrich policy names
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_enrich", "policy", name), params=params, headers=headers
+        )
+
+    @query_params()
+    async def put_policy(self, name, body, params=None, headers=None):
+        """
+        Creates a new enrich policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/put-enrich-policy-api.html>`_
+
+        :arg name: The name of the enrich policy
+        :arg body: The enrich policy to register
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_enrich", "policy", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def stats(self, params=None, headers=None):
+        """
+        Gets enrich coordinator statistics and information about enrich policies that
+        are currently executing.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/enrich-stats-api.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_enrich/_stats", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/client/eql.py b/elasticsearch_7/_async/client/eql.py
new file mode 100644
index 0000000000000000000000000000000000000000..d11b5df9d21005a8da3bc278c24fa54af6203e7d
--- /dev/null
+++ b/elasticsearch_7/_async/client/eql.py
@@ -0,0 +1,29 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, SKIP_IN_PATH, query_params, _make_path
+
+
+class EqlClient(NamespacedClient):
+    @query_params()
+    async def search(self, index, body, params=None, headers=None):
+        """
+        Returns results matching a query expressed in Event Query Language (EQL)
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html>`_
+
+        :arg index: The name of the index to scope the operation
+        :arg body: Eql request body. Use the `query` to limit the query
+            scope.
+        """
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, "_eql", "search"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/_async/client/graph.py b/elasticsearch_7/_async/client/graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6a894239da5504e0612ddb5d578f76fa7bd942e
--- /dev/null
+++ b/elasticsearch_7/_async/client/graph.py
@@ -0,0 +1,33 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class GraphClient(NamespacedClient):
+    @query_params("routing", "timeout")
+    async def explore(self, index, body=None, doc_type=None, params=None, headers=None):
+        """
+        Explore extracted and summarized information about the documents and terms in
+        an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/graph-explore-api.html>`_
+
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg body: Graph Query DSL
+        :arg doc_type: A comma-separated list of document types to
+            search; leave empty to perform the operation on all types
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_graph", "explore"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/_async/client/ilm.py b/elasticsearch_7/_async/client/ilm.py
new file mode 100644
index 0000000000000000000000000000000000000000..b776384f1a9e4eb900fb125a3325f1c88a7d7312
--- /dev/null
+++ b/elasticsearch_7/_async/client/ilm.py
@@ -0,0 +1,162 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class IlmClient(NamespacedClient):
+    @query_params()
+    async def delete_lifecycle(self, policy, params=None, headers=None):
+        """
+        Deletes the specified lifecycle policy definition. A currently used policy
+        cannot be deleted.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-delete-lifecycle.html>`_
+
+        :arg policy: The name of the index lifecycle policy
+        """
+        if policy in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'policy'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ilm", "policy", policy),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("only_errors", "only_managed")
+    async def explain_lifecycle(self, index, params=None, headers=None):
+        """
+        Retrieves information about the index's current lifecycle state, such as the
+        currently executing phase, action, and step.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-explain-lifecycle.html>`_
+
+        :arg index: The name of the index to explain
+        :arg only_errors: filters the indices included in the response
+            to ones in an ILM error state, implies only_managed
+        :arg only_managed: filters the indices included in the response
+            to ones managed by ILM
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_ilm", "explain"), params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_lifecycle(self, policy=None, params=None, headers=None):
+        """
+        Returns the specified policy definition. Includes the policy version and last
+        modified date.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-get-lifecycle.html>`_
+
+        :arg policy: The name of the index lifecycle policy
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_ilm", "policy", policy), params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_status(self, params=None, headers=None):
+        """
+        Retrieves the current index lifecycle management (ILM) status.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-get-status.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_ilm/status", params=params, headers=headers
+        )
+
+    @query_params()
+    async def move_to_step(self, index, body=None, params=None, headers=None):
+        """
+        Manually moves an index into the specified step and executes that step.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-move-to-step.html>`_
+
+        :arg index: The name of the index whose lifecycle step is to
+            change
+        :arg body: The new lifecycle step to move to
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ilm", "move", index),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def put_lifecycle(self, policy, body=None, params=None, headers=None):
+        """
+        Creates a lifecycle policy
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-put-lifecycle.html>`_
+
+        :arg policy: The name of the index lifecycle policy
+        :arg body: The lifecycle policy definition to register
+        """
+        if policy in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'policy'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_ilm", "policy", policy),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def remove_policy(self, index, params=None, headers=None):
+        """
+        Removes the assigned lifecycle policy and stops managing the specified index
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-remove-policy.html>`_
+
+        :arg index: The name of the index to remove policy on
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST", _make_path(index, "_ilm", "remove"), params=params, headers=headers
+        )
+
+    @query_params()
+    async def retry(self, index, params=None, headers=None):
+        """
+        Retries executing the policy for an index that is in the ERROR step.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-retry-policy.html>`_
+
+        :arg index: The name of the indices (comma-separated) whose
+            failed lifecycle step is to be retry
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST", _make_path(index, "_ilm", "retry"), params=params, headers=headers
+        )
+
+    @query_params()
+    async def start(self, params=None, headers=None):
+        """
+        Start the index lifecycle management (ILM) plugin.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-start.html>`_
+        """
+        return await self.transport.perform_request(
+            "POST", "/_ilm/start", params=params, headers=headers
+        )
+
+    @query_params()
+    async def stop(self, params=None, headers=None):
+        """
+        Halts all lifecycle management operations and stops the index lifecycle
+        management (ILM) plugin
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-stop.html>`_
+        """
+        return await self.transport.perform_request(
+            "POST", "/_ilm/stop", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/client/indices.py b/elasticsearch_7/_async/client/indices.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0c74e2f2308053a8317d8460010ccbd011fe9ae
--- /dev/null
+++ b/elasticsearch_7/_async/client/indices.py
@@ -0,0 +1,1439 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class IndicesClient(NamespacedClient):
+    @query_params()
+    async def analyze(self, body=None, index=None, params=None, headers=None):
+        """
+        Performs the analysis process on a text and return the tokens breakdown of the
+        text.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-analyze.html>`_
+
+        :arg body: Define analyzer/tokenizer parameters and the text on
+            which the analysis should be performed
+        :arg index: The name of the index to scope the operation
+        """
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, "_analyze"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
+    async def refresh(self, index=None, params=None, headers=None):
+        """
+        Performs the refresh operation in one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-refresh.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        """
+        return await self.transport.perform_request(
+            "POST", _make_path(index, "_refresh"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "force",
+        "ignore_unavailable",
+        "wait_if_ongoing",
+    )
+    async def flush(self, index=None, params=None, headers=None):
+        """
+        Performs the flush operation on one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-flush.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string for all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg force: Whether a flush should be forced even if it is not
+            necessarily needed ie. if no changes will be committed to the index.
+            This is useful if transaction log IDs should be incremented even if no
+            uncommitted changes are present. (This setting can be considered as
+            internal)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg wait_if_ongoing: If set to true the flush operation will
+            block until the flush can be executed if another flush operation is
+            already executing. The default is true. If set to false the flush will
+            be skipped iff if another flush operation is already running.
+        """
+        return await self.transport.perform_request(
+            "POST", _make_path(index, "_flush"), params=params, headers=headers
+        )
+
+    @query_params(
+        "include_type_name", "master_timeout", "timeout", "wait_for_active_shards"
+    )
+    async def create(self, index, body=None, params=None, headers=None):
+        """
+        Creates an index with optional settings and mappings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-create-index.html>`_
+
+        :arg index: The name of the index
+        :arg body: The configuration for the index (`settings` and
+            `mappings`)
+        :arg include_type_name: Whether a type should be expected in the
+            body of the mappings.
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Set the number of active shards to
+            wait for before the operation returns.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "PUT", _make_path(index), params=params, headers=headers, body=body
+        )
+
+    @query_params("master_timeout", "timeout", "wait_for_active_shards")
+    async def clone(self, index, target, body=None, params=None, headers=None):
+        """
+        Clones an index
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-clone-index.html>`_
+
+        :arg index: The name of the source index to clone
+        :arg target: The name of the target index to clone into
+        :arg body: The configuration for the target index (`settings`
+            and `aliases`)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Set the number of active shards to
+            wait for on the cloned index before the operation returns.
+        """
+        for param in (index, target):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_clone", target),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flat_settings",
+        "ignore_unavailable",
+        "include_defaults",
+        "include_type_name",
+        "local",
+        "master_timeout",
+    )
+    async def get(self, index, params=None, headers=None):
+        """
+        Returns information about one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-get-index.html>`_
+
+        :arg index: A comma-separated list of index names
+        :arg allow_no_indices: Ignore if a wildcard expression resolves
+            to no concrete indices (default: false)
+        :arg expand_wildcards: Whether wildcard expressions should get
+            expanded to open or closed indices (default: open)  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg ignore_unavailable: Ignore unavailable indexes (default:
+            false)
+        :arg include_defaults: Whether to return all default setting for
+            each of the indices.
+        :arg include_type_name: Whether to add the type name to the
+            response (default: false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "GET", _make_path(index), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    async def open(self, index, params=None, headers=None):
+        """
+        Opens an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-open-close.html>`_
+
+        :arg index: A comma separated list of indices to open
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: closed
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of active shards to
+            wait for before the operation returns.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST", _make_path(index, "_open"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    async def close(self, index, params=None, headers=None):
+        """
+        Closes an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-open-close.html>`_
+
+        :arg index: A comma separated list of indices to close
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of active shards to
+            wait for before the operation returns.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST", _make_path(index, "_close"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "master_timeout",
+        "timeout",
+    )
+    async def delete(self, index, params=None, headers=None):
+        """
+        Deletes an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-delete-index.html>`_
+
+        :arg index: A comma-separated list of indices to delete; use
+            `_all` or `*` string to delete all indices
+        :arg allow_no_indices: Ignore if a wildcard expression resolves
+            to no concrete indices (default: false)
+        :arg expand_wildcards: Whether wildcard expressions should get
+            expanded to open or closed indices (default: open)  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Ignore unavailable indexes (default:
+            false)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "DELETE", _make_path(index), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flat_settings",
+        "ignore_unavailable",
+        "include_defaults",
+        "local",
+    )
+    async def exists(self, index, params=None, headers=None):
+        """
+        Returns information about whether a particular index exists.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-exists.html>`_
+
+        :arg index: A comma-separated list of index names
+        :arg allow_no_indices: Ignore if a wildcard expression resolves
+            to no concrete indices (default: false)
+        :arg expand_wildcards: Whether wildcard expressions should get
+            expanded to open or closed indices (default: open)  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg ignore_unavailable: Ignore unavailable indexes (default:
+            false)
+        :arg include_defaults: Whether to return all default setting for
+            each of the indices.
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "HEAD", _make_path(index), params=params, headers=headers
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local")
+    async def exists_type(self, index, doc_type, params=None, headers=None):
+        """
+        Returns information about whether a particular document type exists.
+        (DEPRECATED)
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-types-exists.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` to
+            check the types across all indices
+        :arg doc_type: A comma-separated list of document types to check
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        for param in (index, doc_type):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "HEAD",
+            _make_path(index, "_mapping", doc_type),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "include_type_name",
+        "master_timeout",
+        "timeout",
+    )
+    async def put_mapping(
+        self, body, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Updates the index mappings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-put-mapping.html>`_
+
+        :arg body: The mapping definition
+        :arg index: A comma-separated list of index names the mapping
+            should be added to (supports wildcards); use `_all` or omit to add the
+            mapping on all indices.
+        :arg doc_type: The name of the document type
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg include_type_name: Whether a type should be expected in the
+            body of the mappings.
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        if doc_type not in SKIP_IN_PATH and index in SKIP_IN_PATH:
+            index = "_all"
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path(index, doc_type, "_mapping"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "include_type_name",
+        "local",
+        "master_timeout",
+    )
+    async def get_mapping(self, index=None, doc_type=None, params=None, headers=None):
+        """
+        Returns mappings for one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-get-mapping.html>`_
+
+        :arg index: A comma-separated list of index names
+        :arg doc_type: A comma-separated list of document types
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg include_type_name: Whether to add the type name to the
+            response (default: false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path(index, "_mapping", doc_type),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def put_alias(self, index, name, body=None, params=None, headers=None):
+        """
+        Creates or updates an alias.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-aliases.html>`_
+
+        :arg index: A comma-separated list of index names the alias
+            should point to (supports wildcards); use `_all` to perform the
+            operation on all indices.
+        :arg name: The name of the alias to be created or updated
+        :arg body: The settings for the alias, such as `routing` or
+            `filter`
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit timestamp for the document
+        """
+        for param in (index, name):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_alias", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local")
+    async def exists_alias(self, name, index=None, params=None, headers=None):
+        """
+        Returns information about whether a particular alias exists.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-aliases.html>`_
+
+        :arg name: A comma-separated list of alias names to return
+        :arg index: A comma-separated list of index names to filter
+            aliases
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "HEAD", _make_path(index, "_alias", name), params=params, headers=headers
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local")
+    async def get_alias(self, index=None, name=None, params=None, headers=None):
+        """
+        Returns an alias.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-aliases.html>`_
+
+        :arg index: A comma-separated list of index names to filter
+            aliases
+        :arg name: A comma-separated list of alias names to return
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_alias", name), params=params, headers=headers
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def update_aliases(self, body, params=None, headers=None):
+        """
+        Updates index aliases.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-aliases.html>`_
+
+        :arg body: The definition of `actions` to perform
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Request timeout
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST", "/_aliases", params=params, headers=headers, body=body
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def delete_alias(self, index, name, params=None, headers=None):
+        """
+        Deletes an alias.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-aliases.html>`_
+
+        :arg index: A comma-separated list of index names (supports
+            wildcards); use `_all` for all indices
+        :arg name: A comma-separated list of aliases to delete (supports
+            wildcards); use `_all` to delete all aliases for the specified indices.
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit timestamp for the document
+        """
+        for param in (index, name):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "DELETE", _make_path(index, "_alias", name), params=params, headers=headers
+        )
+
+    @query_params("create", "include_type_name", "master_timeout", "order")
+    async def put_template(self, name, body, params=None, headers=None):
+        """
+        Creates or updates an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the template
+        :arg body: The template definition
+        :arg create: Whether the index template should only be added if
+            new or can also replace an existing one
+        :arg include_type_name: Whether a type should be returned in the
+            body of the mappings.
+        :arg master_timeout: Specify timeout for connection to master
+        :arg order: The order for this template when merging multiple
+            matching ones (higher numbers are merged later, overriding the lower
+            numbers)
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_template", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("flat_settings", "local", "master_timeout")
+    async def exists_template(self, name, params=None, headers=None):
+        """
+        Returns information about whether a particular index template exists.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The comma separated names of the index templates
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "HEAD", _make_path("_template", name), params=params, headers=headers
+        )
+
+    @query_params("flat_settings", "include_type_name", "local", "master_timeout")
+    async def get_template(self, name=None, params=None, headers=None):
+        """
+        Returns an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The comma separated names of the index templates
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg include_type_name: Whether a type should be returned in the
+            body of the mappings.
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_template", name), params=params, headers=headers
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def delete_template(self, name, params=None, headers=None):
+        """
+        Deletes an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the template
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "DELETE", _make_path("_template", name), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flat_settings",
+        "ignore_unavailable",
+        "include_defaults",
+        "local",
+        "master_timeout",
+    )
+    async def get_settings(self, index=None, name=None, params=None, headers=None):
+        """
+        Returns settings for one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-get-settings.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg name: The name of the settings that should be included
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg include_defaults: Whether to return all default setting for
+            each of the indices.
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_settings", name), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flat_settings",
+        "ignore_unavailable",
+        "master_timeout",
+        "preserve_existing",
+        "timeout",
+    )
+    async def put_settings(self, body, index=None, params=None, headers=None):
+        """
+        Updates the index settings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-update-settings.html>`_
+
+        :arg body: The index settings to be updated
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg preserve_existing: Whether to update existing settings. If
+            set to `true` existing settings on an index remain unchanged, the
+            default is `false`
+        :arg timeout: Explicit operation timeout
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_settings"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "completion_fields",
+        "expand_wildcards",
+        "fielddata_fields",
+        "fields",
+        "forbid_closed_indices",
+        "groups",
+        "include_segment_file_sizes",
+        "include_unloaded_segments",
+        "level",
+        "types",
+    )
+    async def stats(self, index=None, metric=None, params=None, headers=None):
+        """
+        Provides statistics on operations happening in an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-stats.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg metric: Limit the information returned the specific
+            metrics.  Valid choices: _all, completion, docs, fielddata, query_cache,
+            flush, get, indexing, merge, request_cache, refresh, search, segments,
+            store, warmer, suggest
+        :arg completion_fields: A comma-separated list of fields for
+            `fielddata` and `suggest` index metric (supports wildcards)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg fielddata_fields: A comma-separated list of fields for
+            `fielddata` index metric (supports wildcards)
+        :arg fields: A comma-separated list of fields for `fielddata`
+            and `completion` index metric (supports wildcards)
+        :arg forbid_closed_indices: If set to false stats will also
+            collected from closed indices if explicitly specified or if
+            expand_wildcards expands to closed indices  Default: True
+        :arg groups: A comma-separated list of search groups for
+            `search` index metric
+        :arg include_segment_file_sizes: Whether to report the
+            aggregated disk usage of each one of the Lucene index files (only
+            applies if segment stats are requested)
+        :arg include_unloaded_segments: If set to true segment stats
+            will include stats for segments that are not currently loaded into
+            memory
+        :arg level: Return stats aggregated at cluster, index or shard
+            level  Valid choices: cluster, indices, shards  Default: indices
+        :arg types: A comma-separated list of document types for the
+            `indexing` index metric
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_stats", metric), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices", "expand_wildcards", "ignore_unavailable", "verbose"
+    )
+    async def segments(self, index=None, params=None, headers=None):
+        """
+        Provides low-level information about segments in a Lucene index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-segments.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg verbose: Includes detailed memory usage by Lucene.
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_segments"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "fielddata",
+        "fields",
+        "ignore_unavailable",
+        "query",
+        "request",
+    )
+    async def clear_cache(self, index=None, params=None, headers=None):
+        """
+        Clears all or specific caches for one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-clearcache.html>`_
+
+        :arg index: A comma-separated list of index name to limit the
+            operation
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg fielddata: Clear field data
+        :arg fields: A comma-separated list of fields to clear when
+            using the `fielddata` parameter (default: all)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg query: Clear query caches
+        :arg request: Clear request cache
+        """
+        return await self.transport.perform_request(
+            "POST", _make_path(index, "_cache", "clear"), params=params, headers=headers
+        )
+
+    @query_params("active_only", "detailed")
+    async def recovery(self, index=None, params=None, headers=None):
+        """
+        Returns information about ongoing index shard recoveries.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-recovery.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg active_only: Display only those recoveries that are
+            currently on-going
+        :arg detailed: Whether to display detailed information about
+            shard recovery
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_recovery"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "only_ancient_segments",
+        "wait_for_completion",
+    )
+    async def upgrade(self, index=None, params=None, headers=None):
+        """
+        The _upgrade API is no longer useful and will be removed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-upgrade.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg only_ancient_segments: If true, only ancient (an older
+            Lucene major release) segments will be upgraded
+        :arg wait_for_completion: Specify whether the request should
+            block until the all segments are upgraded (default: false)
+        """
+        return await self.transport.perform_request(
+            "POST", _make_path(index, "_upgrade"), params=params, headers=headers
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
+    async def get_upgrade(self, index=None, params=None, headers=None):
+        """
+        The _upgrade API is no longer useful and will be removed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-upgrade.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_upgrade"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices", "expand_wildcards", "ignore_unavailable", "status"
+    )
+    async def shard_stores(self, index=None, params=None, headers=None):
+        """
+        Provides store information for shard copies of indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-shards-stores.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg status: A comma-separated list of statuses used to filter
+            on shards to get store information for  Valid choices: green, yellow,
+            red, all
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_shard_stores"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flush",
+        "ignore_unavailable",
+        "max_num_segments",
+        "only_expunge_deletes",
+    )
+    async def forcemerge(self, index=None, params=None, headers=None):
+        """
+        Performs the force merge operation on one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-forcemerge.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg flush: Specify whether the index should be flushed after
+            performing the operation (default: true)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg max_num_segments: The number of segments the index should
+            be merged into (default: dynamic)
+        :arg only_expunge_deletes: Specify whether the operation should
+            only expunge deleted documents
+        """
+        return await self.transport.perform_request(
+            "POST", _make_path(index, "_forcemerge"), params=params, headers=headers
+        )
+
+    @query_params(
+        "copy_settings", "master_timeout", "timeout", "wait_for_active_shards"
+    )
+    async def shrink(self, index, target, body=None, params=None, headers=None):
+        """
+        Allow to shrink an existing index into a new index with fewer primary shards.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-shrink-index.html>`_
+
+        :arg index: The name of the source index to shrink
+        :arg target: The name of the target index to shrink into
+        :arg body: The configuration for the target index (`settings`
+            and `aliases`)
+        :arg copy_settings: whether or not to copy settings from the
+            source index (defaults to false)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Set the number of active shards to
+            wait for on the shrunken index before the operation returns.
+        """
+        for param in (index, target):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_shrink", target),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "copy_settings", "master_timeout", "timeout", "wait_for_active_shards"
+    )
+    async def split(self, index, target, body=None, params=None, headers=None):
+        """
+        Allows you to split an existing index into a new index with more primary
+        shards.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-split-index.html>`_
+
+        :arg index: The name of the source index to split
+        :arg target: The name of the target index to split into
+        :arg body: The configuration for the target index (`settings`
+            and `aliases`)
+        :arg copy_settings: whether or not to copy settings from the
+            source index (defaults to false)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Set the number of active shards to
+            wait for on the shrunken index before the operation returns.
+        """
+        for param in (index, target):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_split", target),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "dry_run",
+        "include_type_name",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    async def rollover(
+        self, alias, body=None, new_index=None, params=None, headers=None
+    ):
+        """
+        Updates an alias to point to a new index when the existing index is considered
+        to be too large or too old.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-rollover-index.html>`_
+
+        :arg alias: The name of the alias to rollover
+        :arg body: The conditions that needs to be met for executing
+            rollover
+        :arg new_index: The name of the rollover index
+        :arg dry_run: If set to true the rollover action will only be
+            validated but not actually performed even if a condition matches. The
+            default is false
+        :arg include_type_name: Whether a type should be included in the
+            body of the mappings.
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Set the number of active shards to
+            wait for on the newly created rollover index before the operation
+            returns.
+        """
+        if alias in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'alias'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(alias, "_rollover", new_index),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    async def freeze(self, index, params=None, headers=None):
+        """
+        Freezes an index. A frozen index has almost no overhead on the cluster (except
+        for maintaining its metadata in memory) and is read-only.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/freeze-index-api.html>`_
+
+        :arg index: The name of the index to freeze
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: closed
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of active shards to
+            wait for before the operation returns.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST", _make_path(index, "_freeze"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    async def unfreeze(self, index, params=None, headers=None):
+        """
+        Unfreezes an index. When a frozen index is unfrozen, the index goes through the
+        normal recovery process and becomes writeable again.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/unfreeze-index-api.html>`_
+
+        :arg index: The name of the index to unfreeze
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: closed
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of active shards to
+            wait for before the operation returns.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "POST", _make_path(index, "_unfreeze"), params=params, headers=headers
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
+    async def reload_search_analyzers(self, index, params=None, headers=None):
+        """
+        Reloads an index's search analyzers and their resources.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-reload-analyzers.html>`_
+
+        :arg index: A comma-separated list of index names to reload
+            analyzers for
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path(index, "_reload_search_analyzers"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "include_defaults",
+        "include_type_name",
+        "local",
+    )
+    async def get_field_mapping(
+        self, fields, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Returns mapping for one or more fields.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-get-field-mapping.html>`_
+
+        :arg fields: A comma-separated list of fields
+        :arg index: A comma-separated list of index names
+        :arg doc_type: A comma-separated list of document types
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg include_defaults: Whether the default mapping values should
+            be returned as well
+        :arg include_type_name: Whether a type should be returned in the
+            body of the mappings.
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        if fields in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'fields'.")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path(index, "_mapping", doc_type, "field", fields),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "all_shards",
+        "allow_no_indices",
+        "analyze_wildcard",
+        "analyzer",
+        "default_operator",
+        "df",
+        "expand_wildcards",
+        "explain",
+        "ignore_unavailable",
+        "lenient",
+        "q",
+        "rewrite",
+    )
+    async def validate_query(
+        self, body=None, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Allows a user to validate a potentially expensive query without executing it.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-validate.html>`_
+
+        :arg body: The query definition specified with the Query DSL
+        :arg index: A comma-separated list of index names to restrict
+            the operation; use `_all` or empty string to perform the operation on
+            all indices
+        :arg doc_type: A comma-separated list of document types to
+            restrict the operation; leave empty to perform the operation on all
+            types
+        :arg all_shards: Execute validation on all shards instead of one
+            random shard per index
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg explain: Return detailed information about the error
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg q: Query in the Lucene query string syntax
+        :arg rewrite: Provide a more detailed explanation showing the
+            actual Lucene query that will be executed.
+        """
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_validate", "query"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def create_data_stream(self, name, body, params=None, headers=None):
+        """
+        Creates or updates a data stream
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/data-streams.html>`_
+
+        :arg name: The name of the data stream
+        :arg body: The data stream definition
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_data_stream", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def delete_data_stream(self, name, params=None, headers=None):
+        """
+        Deletes a data stream.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/data-streams.html>`_
+
+        :arg name: The name of the data stream
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "DELETE", _make_path("_data_stream", name), params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_data_streams(self, name=None, params=None, headers=None):
+        """
+        Returns data streams.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/data-streams.html>`_
+
+        :arg name: The name or wildcard expression of the requested data
+            streams
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_data_streams", name), params=params, headers=headers
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def delete_index_template(self, name, params=None, headers=None):
+        """
+        Deletes an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the template
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_index_template", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("flat_settings", "local", "master_timeout")
+    async def get_index_template(self, name=None, params=None, headers=None):
+        """
+        Returns an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The comma separated names of the index templates
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_index_template", name), params=params, headers=headers
+        )
+
+    @query_params("cause", "create", "master_timeout")
+    async def put_index_template(self, name, body, params=None, headers=None):
+        """
+        Creates or updates an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the template
+        :arg body: The template definition
+        :arg cause: User defined reason for creating/updating the index
+            template
+        :arg create: Whether the index template should only be added if
+            new or can also replace an existing one
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_index_template", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("flat_settings", "local", "master_timeout")
+    async def exists_index_template(self, name, params=None, headers=None):
+        """
+        Returns information about whether a particular index template exists.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the template
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "HEAD", _make_path("_index_template", name), params=params, headers=headers
+        )
+
+    @query_params("cause", "create", "master_timeout")
+    async def simulate_index_template(self, name, body=None, params=None, headers=None):
+        """
+        Simulate matching the given index name against the index templates in the
+        system
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the index (it must be a concrete index
+            name)
+        :arg body: New index template definition, which will be included
+            in the simulation, as if it already exists in the system
+        :arg cause: User defined reason for dry-run creating the new
+            template for simulation purposes
+        :arg create: Whether the index template we optionally defined in
+            the body should only be dry-run added if new or can also replace an
+            existing one
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_index_template", "_simulate_index", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
+    async def flush_synced(self, index=None, params=None, headers=None):
+        """
+        Performs a synced flush operation on one or more indices. Synced flush is
+        deprecated and will be removed in 8.0. Use flush instead
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-synced-flush-api.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string for all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        """
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, "_flush", "synced"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/_async/client/ingest.py b/elasticsearch_7/_async/client/ingest.py
new file mode 100644
index 0000000000000000000000000000000000000000..94a87aa342aea5831dc5e79ca236fdfc38bdb610
--- /dev/null
+++ b/elasticsearch_7/_async/client/ingest.py
@@ -0,0 +1,99 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class IngestClient(NamespacedClient):
+    @query_params("master_timeout")
+    async def get_pipeline(self, id=None, params=None, headers=None):
+        """
+        Returns a pipeline.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-pipeline-api.html>`_
+
+        :arg id: Comma separated list of pipeline ids. Wildcards
+            supported
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_ingest", "pipeline", id), params=params, headers=headers
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def put_pipeline(self, id, body, params=None, headers=None):
+        """
+        Creates or updates a pipeline.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/put-pipeline-api.html>`_
+
+        :arg id: Pipeline ID
+        :arg body: The ingest definition
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        for param in (id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_ingest", "pipeline", id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def delete_pipeline(self, id, params=None, headers=None):
+        """
+        Deletes a pipeline.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-pipeline-api.html>`_
+
+        :arg id: Pipeline ID
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ingest", "pipeline", id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("verbose")
+    async def simulate(self, body, id=None, params=None, headers=None):
+        """
+        Allows to simulate a pipeline with example documents.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/simulate-pipeline-api.html>`_
+
+        :arg body: The simulate definition
+        :arg id: Pipeline ID
+        :arg verbose: Verbose mode. Display data output for each
+            processor in executed pipeline
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ingest", "pipeline", id, "_simulate"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def processor_grok(self, params=None, headers=None):
+        """
+        Returns a list of the built-in patterns.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/grok-processor.html#grok-processor-rest-get>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_ingest/processor/grok", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/client/license.py b/elasticsearch_7/_async/client/license.py
new file mode 100644
index 0000000000000000000000000000000000000000..19eb55380c65cf52b35690302660d765f76355cb
--- /dev/null
+++ b/elasticsearch_7/_async/client/license.py
@@ -0,0 +1,98 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params
+
+
+class LicenseClient(NamespacedClient):
+    @query_params()
+    async def delete(self, params=None, headers=None):
+        """
+        Deletes licensing information for the cluster
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-license.html>`_
+        """
+        return await self.transport.perform_request(
+            "DELETE", "/_license", params=params, headers=headers
+        )
+
+    @query_params("accept_enterprise", "local")
+    async def get(self, params=None, headers=None):
+        """
+        Retrieves licensing information for the cluster
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-license.html>`_
+
+        :arg accept_enterprise: If the active license is an enterprise
+            license, return type as 'enterprise' (default: false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        return await self.transport.perform_request(
+            "GET", "/_license", params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_basic_status(self, params=None, headers=None):
+        """
+        Retrieves information about the status of the basic license.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-basic-status.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_license/basic_status", params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_trial_status(self, params=None, headers=None):
+        """
+        Retrieves information about the status of the trial license.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-trial-status.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_license/trial_status", params=params, headers=headers
+        )
+
+    @query_params("acknowledge")
+    async def post(self, body=None, params=None, headers=None):
+        """
+        Updates the license for the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/update-license.html>`_
+
+        :arg body: licenses to be installed
+        :arg acknowledge: whether the user has acknowledged acknowledge
+            messages (default: false)
+        """
+        return await self.transport.perform_request(
+            "PUT", "/_license", params=params, headers=headers, body=body
+        )
+
+    @query_params("acknowledge")
+    async def post_start_basic(self, params=None, headers=None):
+        """
+        Starts an indefinite basic license.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/start-basic.html>`_
+
+        :arg acknowledge: whether the user has acknowledged acknowledge
+            messages (default: false)
+        """
+        return await self.transport.perform_request(
+            "POST", "/_license/start_basic", params=params, headers=headers
+        )
+
+    @query_params("acknowledge", "doc_type")
+    async def post_start_trial(self, params=None, headers=None):
+        """
+        starts a limited time trial license.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/start-trial.html>`_
+
+        :arg acknowledge: whether the user has acknowledged acknowledge
+            messages (default: false)
+        :arg doc_type: The type of trial license to generate (default:
+            "trial")
+        """
+        # type is a reserved word so it cannot be used, use doc_type instead
+        if "doc_type" in params:
+            params["type"] = params.pop("doc_type")
+
+        return await self.transport.perform_request(
+            "POST", "/_license/start_trial", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/client/migration.py b/elasticsearch_7/_async/client/migration.py
new file mode 100644
index 0000000000000000000000000000000000000000..793c028ff232da93af0235b4269df2040d0cc09b
--- /dev/null
+++ b/elasticsearch_7/_async/client/migration.py
@@ -0,0 +1,24 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path
+
+
+class MigrationClient(NamespacedClient):
+    @query_params()
+    async def deprecations(self, index=None, params=None, headers=None):
+        """
+        Retrieves information about different cluster, node, and index level settings
+        that use deprecated features that will be removed or changed in the next major
+        version.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/migration-api-deprecation.html>`_
+
+        :arg index: Index pattern
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path(index, "_migration", "deprecations"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/_async/client/ml.py b/elasticsearch_7/_async/client/ml.py
new file mode 100644
index 0000000000000000000000000000000000000000..2acba529aa941e6531d0a01fe8df58c9d92743d8
--- /dev/null
+++ b/elasticsearch_7/_async/client/ml.py
@@ -0,0 +1,1502 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body
+
+
+class MlClient(NamespacedClient):
+    @query_params("allow_no_jobs", "force", "timeout")
+    async def close_job(self, job_id, body=None, params=None, headers=None):
+        """
+        Closes one or more anomaly detection jobs. A job can be opened and closed
+        multiple times throughout its lifecycle.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-close-job.html>`_
+
+        :arg job_id: The name of the job to close
+        :arg body: The URL params optionally sent in the body
+        :arg allow_no_jobs: Whether to ignore if a wildcard expression
+            matches no jobs. (This includes `_all` string or when no jobs have been
+            specified)
+        :arg force: True if the job should be forcefully closed
+        :arg timeout: Controls the time to wait until a job has closed.
+            Default to 30 minutes
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_close"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def delete_calendar(self, calendar_id, params=None, headers=None):
+        """
+        Deletes a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-calendar.html>`_
+
+        :arg calendar_id: The ID of the calendar to delete
+        """
+        if calendar_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'calendar_id'."
+            )
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "calendars", calendar_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def delete_calendar_event(
+        self, calendar_id, event_id, params=None, headers=None
+    ):
+        """
+        Deletes scheduled events from a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-calendar-event.html>`_
+
+        :arg calendar_id: The ID of the calendar to modify
+        :arg event_id: The ID of the event to remove from the calendar
+        """
+        for param in (calendar_id, event_id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "calendars", calendar_id, "events", event_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def delete_calendar_job(self, calendar_id, job_id, params=None, headers=None):
+        """
+        Deletes anomaly detection jobs from a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-calendar-job.html>`_
+
+        :arg calendar_id: The ID of the calendar to modify
+        :arg job_id: The ID of the job to remove from the calendar
+        """
+        for param in (calendar_id, job_id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "calendars", calendar_id, "jobs", job_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("force")
+    async def delete_datafeed(self, datafeed_id, params=None, headers=None):
+        """
+        Deletes an existing datafeed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to delete
+        :arg force: True if the datafeed should be forcefully deleted
+        """
+        if datafeed_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'datafeed_id'."
+            )
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "datafeeds", datafeed_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def delete_expired_data(self, body=None, params=None, headers=None):
+        """
+        Deletes expired and unused machine learning data.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-expired-data.html>`_
+
+        :arg body: deleting expired data parameters
+        """
+        return await self.transport.perform_request(
+            "DELETE",
+            "/_ml/_delete_expired_data",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def delete_filter(self, filter_id, params=None, headers=None):
+        """
+        Deletes a filter.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-filter.html>`_
+
+        :arg filter_id: The ID of the filter to delete
+        """
+        if filter_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'filter_id'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "filters", filter_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_forecasts", "timeout")
+    async def delete_forecast(
+        self, job_id, forecast_id=None, params=None, headers=None
+    ):
+        """
+        Deletes forecasts from a machine learning job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-forecast.html>`_
+
+        :arg job_id: The ID of the job from which to delete forecasts
+        :arg forecast_id: The ID of the forecast to delete, can be comma
+            delimited list. Leaving blank implies `_all`
+        :arg allow_no_forecasts: Whether to ignore if `_all` matches no
+            forecasts
+        :arg timeout: Controls the time to wait until the forecast(s)
+            are deleted. Default to 30 seconds
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "anomaly_detectors", job_id, "_forecast", forecast_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("force", "wait_for_completion")
+    async def delete_job(self, job_id, params=None, headers=None):
+        """
+        Deletes an existing anomaly detection job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-job.html>`_
+
+        :arg job_id: The ID of the job to delete
+        :arg force: True if the job should be forcefully deleted
+        :arg wait_for_completion: Should this request wait until the
+            operation has completed before returning  Default: True
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "anomaly_detectors", job_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def delete_model_snapshot(
+        self, job_id, snapshot_id, params=None, headers=None
+    ):
+        """
+        Deletes an existing model snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-snapshot.html>`_
+
+        :arg job_id: The ID of the job to fetch
+        :arg snapshot_id: The ID of the snapshot to delete
+        """
+        for param in (job_id, snapshot_id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path(
+                "_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id
+            ),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "charset",
+        "column_names",
+        "delimiter",
+        "explain",
+        "format",
+        "grok_pattern",
+        "has_header_row",
+        "line_merge_size_limit",
+        "lines_to_sample",
+        "quote",
+        "should_trim_fields",
+        "timeout",
+        "timestamp_field",
+        "timestamp_format",
+    )
+    async def find_file_structure(self, body, params=None, headers=None):
+        """
+        Finds the structure of a text file. The text file must contain data that is
+        suitable to be ingested into Elasticsearch.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-find-file-structure.html>`_
+
+        :arg body: The contents of the file to be analyzed
+        :arg charset: Optional parameter to specify the character set of
+            the file
+        :arg column_names: Optional parameter containing a comma
+            separated list of the column names for a delimited file
+        :arg delimiter: Optional parameter to specify the delimiter
+            character for a delimited file - must be a single character
+        :arg explain: Whether to include a commentary on how the
+            structure was derived
+        :arg format: Optional parameter to specify the high level file
+            format  Valid choices: ndjson, xml, delimited, semi_structured_text
+        :arg grok_pattern: Optional parameter to specify the Grok
+            pattern that should be used to extract fields from messages in a semi-
+            structured text file
+        :arg has_header_row: Optional parameter to specify whether a
+            delimited file includes the column names in its first row
+        :arg line_merge_size_limit: Maximum number of characters
+            permitted in a single message when lines are merged to create messages.
+            Default: 10000
+        :arg lines_to_sample: How many lines of the file should be
+            included in the analysis  Default: 1000
+        :arg quote: Optional parameter to specify the quote character
+            for a delimited file - must be a single character
+        :arg should_trim_fields: Optional parameter to specify whether
+            the values between delimiters in a delimited file should have whitespace
+            trimmed from them
+        :arg timeout: Timeout after which the analysis will be aborted
+            Default: 25s
+        :arg timestamp_field: Optional parameter to specify the
+            timestamp field in the file
+        :arg timestamp_format: Optional parameter to specify the
+            timestamp format in the file - may be either a Joda or Java time format
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return await self.transport.perform_request(
+            "POST",
+            "/_ml/find_file_structure",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("advance_time", "calc_interim", "end", "skip_time", "start")
+    async def flush_job(self, job_id, body=None, params=None, headers=None):
+        """
+        Forces any buffered data to be processed by the job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-flush-job.html>`_
+
+        :arg job_id: The name of the job to flush
+        :arg body: Flush parameters
+        :arg advance_time: Advances time to the given value generating
+            results and updating the model for the advanced interval
+        :arg calc_interim: Calculates interim results for the most
+            recent bucket or all buckets within the latency period
+        :arg end: When used in conjunction with calc_interim, specifies
+            the range of buckets on which to calculate interim results
+        :arg skip_time: Skips time to the given value without generating
+            results or updating the model for the skipped interval
+        :arg start: When used in conjunction with calc_interim,
+            specifies the range of buckets on which to calculate interim results
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_flush"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("duration", "expires_in")
+    async def forecast(self, job_id, params=None, headers=None):
+        """
+        Predicts the future behavior of a time series by using its historical behavior.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-forecast.html>`_
+
+        :arg job_id: The ID of the job to forecast for
+        :arg duration: The duration of the forecast
+        :arg expires_in: The time interval after which the forecast
+            expires. Expired forecasts will be deleted at the first opportunity.
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_forecast"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "anomaly_score",
+        "desc",
+        "end",
+        "exclude_interim",
+        "expand",
+        "from_",
+        "size",
+        "sort",
+        "start",
+    )
+    async def get_buckets(
+        self, job_id, body=None, timestamp=None, params=None, headers=None
+    ):
+        """
+        Retrieves anomaly detection job results for one or more buckets.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-bucket.html>`_
+
+        :arg job_id: ID of the job to get bucket results from
+        :arg body: Bucket selection details if not provided in URI
+        :arg timestamp: The timestamp of the desired single bucket
+            result
+        :arg anomaly_score: Filter for the most anomalous buckets
+        :arg desc: Set the sort direction
+        :arg end: End time filter for buckets
+        :arg exclude_interim: Exclude interim results
+        :arg expand: Include anomaly records
+        :arg from_: skips a number of buckets
+        :arg size: specifies a max number of buckets to get
+        :arg sort: Sort buckets by a particular field
+        :arg start: Start time filter for buckets
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml", "anomaly_detectors", job_id, "results", "buckets", timestamp
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("end", "from_", "job_id", "size", "start")
+    async def get_calendar_events(self, calendar_id, params=None, headers=None):
+        """
+        Retrieves information about the scheduled events in calendars.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-calendar-event.html>`_
+
+        :arg calendar_id: The ID of the calendar containing the events
+        :arg end: Get events before this time
+        :arg from_: Skips a number of events
+        :arg job_id: Get events for the job. When this option is used
+            calendar_id must be '_all'
+        :arg size: Specifies a max number of events to get
+        :arg start: Get events after this time
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if calendar_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'calendar_id'."
+            )
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "calendars", calendar_id, "events"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("from_", "size")
+    async def get_calendars(
+        self, body=None, calendar_id=None, params=None, headers=None
+    ):
+        """
+        Retrieves configuration information for calendars.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-calendar.html>`_
+
+        :arg body: The from and size parameters optionally sent in the
+            body
+        :arg calendar_id: The ID of the calendar to fetch
+        :arg from_: skips a number of calendars
+        :arg size: specifies a max number of calendars to get
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "calendars", calendar_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_datafeeds")
+    async def get_datafeed_stats(self, datafeed_id=None, params=None, headers=None):
+        """
+        Retrieves usage information for datafeeds.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-datafeed-stats.html>`_
+
+        :arg datafeed_id: The ID of the datafeeds stats to fetch
+        :arg allow_no_datafeeds: Whether to ignore if a wildcard
+            expression matches no datafeeds. (This includes `_all` string or when no
+            datafeeds have been specified)
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "datafeeds", datafeed_id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_datafeeds")
+    async def get_datafeeds(self, datafeed_id=None, params=None, headers=None):
+        """
+        Retrieves configuration information for datafeeds.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeeds to fetch
+        :arg allow_no_datafeeds: Whether to ignore if a wildcard
+            expression matches no datafeeds. (This includes `_all` string or when no
+            datafeeds have been specified)
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "datafeeds", datafeed_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("from_", "size")
+    async def get_filters(self, filter_id=None, params=None, headers=None):
+        """
+        Retrieves filters.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-filter.html>`_
+
+        :arg filter_id: The ID of the filter to fetch
+        :arg from_: skips a number of filters
+        :arg size: specifies a max number of filters to get
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "filters", filter_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "desc",
+        "end",
+        "exclude_interim",
+        "from_",
+        "influencer_score",
+        "size",
+        "sort",
+        "start",
+    )
+    async def get_influencers(self, job_id, body=None, params=None, headers=None):
+        """
+        Retrieves anomaly detection job results for one or more influencers.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-influencer.html>`_
+
+        :arg job_id: Identifier for the anomaly detection job
+        :arg body: Influencer selection criteria
+        :arg desc: whether the results should be sorted in decending
+            order
+        :arg end: end timestamp for the requested influencers
+        :arg exclude_interim: Exclude interim results
+        :arg from_: skips a number of influencers
+        :arg influencer_score: influencer score threshold for the
+            requested influencers
+        :arg size: specifies a max number of influencers to get
+        :arg sort: sort field for the requested influencers
+        :arg start: start timestamp for the requested influencers
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "results", "influencers"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_jobs")
+    async def get_job_stats(self, job_id=None, params=None, headers=None):
+        """
+        Retrieves usage information for anomaly detection jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-job-stats.html>`_
+
+        :arg job_id: The ID of the jobs stats to fetch
+        :arg allow_no_jobs: Whether to ignore if a wildcard expression
+            matches no jobs. (This includes `_all` string or when no jobs have been
+            specified)
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "anomaly_detectors", job_id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_jobs")
+    async def get_jobs(self, job_id=None, params=None, headers=None):
+        """
+        Retrieves configuration information for anomaly detection jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-job.html>`_
+
+        :arg job_id: The ID of the jobs to fetch
+        :arg allow_no_jobs: Whether to ignore if a wildcard expression
+            matches no jobs. (This includes `_all` string or when no jobs have been
+            specified)
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "anomaly_detectors", job_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_jobs",
+        "bucket_span",
+        "end",
+        "exclude_interim",
+        "overall_score",
+        "start",
+        "top_n",
+    )
+    async def get_overall_buckets(self, job_id, body=None, params=None, headers=None):
+        """
+        Retrieves overall bucket results that summarize the bucket results of multiple
+        anomaly detection jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-overall-buckets.html>`_
+
+        :arg job_id: The job IDs for which to calculate overall bucket
+            results
+        :arg body: Overall bucket selection details if not provided in
+            URI
+        :arg allow_no_jobs: Whether to ignore if a wildcard expression
+            matches no jobs. (This includes `_all` string or when no jobs have been
+            specified)
+        :arg bucket_span: The span of the overall buckets. Defaults to
+            the longest job bucket_span
+        :arg end: Returns overall buckets with timestamps earlier than
+            this time
+        :arg exclude_interim: If true overall buckets that include
+            interim buckets will be excluded
+        :arg overall_score: Returns overall buckets with overall scores
+            higher than this value
+        :arg start: Returns overall buckets with timestamps after this
+            time
+        :arg top_n: The number of top job bucket scores to be used in
+            the overall_score calculation
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml", "anomaly_detectors", job_id, "results", "overall_buckets"
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "desc",
+        "end",
+        "exclude_interim",
+        "from_",
+        "record_score",
+        "size",
+        "sort",
+        "start",
+    )
+    async def get_records(self, job_id, body=None, params=None, headers=None):
+        """
+        Retrieves anomaly records for an anomaly detection job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-record.html>`_
+
+        :arg job_id: The ID of the job
+        :arg body: Record selection criteria
+        :arg desc: Set the sort direction
+        :arg end: End time filter for records
+        :arg exclude_interim: Exclude interim results
+        :arg from_: skips a number of records
+        :arg record_score: Returns records with anomaly scores greater
+            or equal than this value
+        :arg size: specifies a max number of records to get
+        :arg sort: Sort records by a particular field
+        :arg start: Start time filter for records
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "results", "records"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def info(self, params=None, headers=None):
+        """
+        Returns defaults and limits used by machine learning.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-ml-info.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_ml/info", params=params, headers=headers
+        )
+
+    @query_params()
+    async def open_job(self, job_id, params=None, headers=None):
+        """
+        Opens one or more anomaly detection jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-open-job.html>`_
+
+        :arg job_id: The ID of the job to open
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_open"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def post_calendar_events(self, calendar_id, body, params=None, headers=None):
+        """
+        Posts scheduled events in a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-post-calendar-event.html>`_
+
+        :arg calendar_id: The ID of the calendar to modify
+        :arg body: A list of events
+        """
+        for param in (calendar_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "calendars", calendar_id, "events"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("reset_end", "reset_start")
+    async def post_data(self, job_id, body, params=None, headers=None):
+        """
+        Sends data to an anomaly detection job for analysis.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-post-data.html>`_
+
+        :arg job_id: The name of the job receiving the data
+        :arg body: The data to process
+        :arg reset_end: Optional parameter to specify the end of the
+            bucket resetting range
+        :arg reset_start: Optional parameter to specify the start of the
+            bucket resetting range
+        """
+        for param in (job_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_data"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def preview_datafeed(self, datafeed_id, params=None, headers=None):
+        """
+        Previews a datafeed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-preview-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to preview
+        """
+        if datafeed_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'datafeed_id'."
+            )
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "datafeeds", datafeed_id, "_preview"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def put_calendar(self, calendar_id, body=None, params=None, headers=None):
+        """
+        Instantiates a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-put-calendar.html>`_
+
+        :arg calendar_id: The ID of the calendar to create
+        :arg body: The calendar details
+        """
+        if calendar_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'calendar_id'."
+            )
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "calendars", calendar_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def put_calendar_job(self, calendar_id, job_id, params=None, headers=None):
+        """
+        Adds an anomaly detection job to a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-put-calendar-job.html>`_
+
+        :arg calendar_id: The ID of the calendar to modify
+        :arg job_id: The ID of the job to add to the calendar
+        """
+        for param in (calendar_id, job_id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "calendars", calendar_id, "jobs", job_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_indices", "expand_wildcards", "ignore_throttled", "ignore_unavailable"
+    )
+    async def put_datafeed(self, datafeed_id, body, params=None, headers=None):
+        """
+        Instantiates a datafeed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-put-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to create
+        :arg body: The datafeed config
+        :arg allow_no_indices: Ignore if the source indices expressions
+            resolves to no concrete indices (default: true)
+        :arg expand_wildcards: Whether source index expressions should
+            get expanded to open or closed indices (default: open)  Valid choices:
+            open, closed, hidden, none, all
+        :arg ignore_throttled: Ignore indices that are marked as
+            throttled (default: true)
+        :arg ignore_unavailable: Ignore unavailable indexes (default:
+            false)
+        """
+        for param in (datafeed_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "datafeeds", datafeed_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def put_filter(self, filter_id, body, params=None, headers=None):
+        """
+        Instantiates a filter.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-put-filter.html>`_
+
+        :arg filter_id: The ID of the filter to create
+        :arg body: The filter details
+        """
+        for param in (filter_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "filters", filter_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def put_job(self, job_id, body, params=None, headers=None):
+        """
+        Instantiates an anomaly detection job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-put-job.html>`_
+
+        :arg job_id: The ID of the job to create
+        :arg body: The job
+        """
+        for param in (job_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "anomaly_detectors", job_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("enabled", "timeout")
+    async def set_upgrade_mode(self, params=None, headers=None):
+        """
+        Sets a cluster wide upgrade_mode setting that prepares machine learning indices
+        for an upgrade.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-set-upgrade-mode.html>`_
+
+        :arg enabled: Whether to enable upgrade_mode ML setting or not.
+            Defaults to false.
+        :arg timeout: Controls the time to wait before action times out.
+            Defaults to 30 seconds
+        """
+        return await self.transport.perform_request(
+            "POST", "/_ml/set_upgrade_mode", params=params, headers=headers
+        )
+
+    @query_params("end", "start", "timeout")
+    async def start_datafeed(self, datafeed_id, body=None, params=None, headers=None):
+        """
+        Starts one or more datafeeds.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-start-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to start
+        :arg body: The start datafeed parameters
+        :arg end: The end time when the datafeed should stop. When not
+            set, the datafeed continues in real time
+        :arg start: The start time from where the datafeed should begin
+        :arg timeout: Controls the time to wait until a datafeed has
+            started. Default to 20 seconds
+        """
+        if datafeed_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'datafeed_id'."
+            )
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "datafeeds", datafeed_id, "_start"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_datafeeds", "force", "timeout")
+    async def stop_datafeed(self, datafeed_id, params=None, headers=None):
+        """
+        Stops one or more datafeeds.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-stop-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to stop
+        :arg allow_no_datafeeds: Whether to ignore if a wildcard
+            expression matches no datafeeds. (This includes `_all` string or when no
+            datafeeds have been specified)
+        :arg force: True if the datafeed should be forcefully stopped.
+        :arg timeout: Controls the time to wait until a datafeed has
+            stopped. Default to 20 seconds
+        """
+        if datafeed_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'datafeed_id'."
+            )
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "datafeeds", datafeed_id, "_stop"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_indices", "expand_wildcards", "ignore_throttled", "ignore_unavailable"
+    )
+    async def update_datafeed(self, datafeed_id, body, params=None, headers=None):
+        """
+        Updates certain properties of a datafeed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-update-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to update
+        :arg body: The datafeed update settings
+        :arg allow_no_indices: Ignore if the source indices expressions
+            resolves to no concrete indices (default: true)
+        :arg expand_wildcards: Whether source index expressions should
+            get expanded to open or closed indices (default: open)  Valid choices:
+            open, closed, hidden, none, all
+        :arg ignore_throttled: Ignore indices that are marked as
+            throttled (default: true)
+        :arg ignore_unavailable: Ignore unavailable indexes (default:
+            false)
+        """
+        for param in (datafeed_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "datafeeds", datafeed_id, "_update"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def update_filter(self, filter_id, body, params=None, headers=None):
+        """
+        Updates the description of a filter, adds items, or removes items.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-update-filter.html>`_
+
+        :arg filter_id: The ID of the filter to update
+        :arg body: The filter update
+        """
+        for param in (filter_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "filters", filter_id, "_update"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def update_job(self, job_id, body, params=None, headers=None):
+        """
+        Updates certain properties of an anomaly detection job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-update-job.html>`_
+
+        :arg job_id: The ID of the job to create
+        :arg body: The job update settings
+        """
+        for param in (job_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_update"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def validate(self, body, params=None, headers=None):
+        """
+        Validates an anomaly detection job.
+        `<https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html>`_
+
+        :arg body: The job config
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            "/_ml/anomaly_detectors/_validate",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def validate_detector(self, body, params=None, headers=None):
+        """
+        Validates an anomaly detection detector.
+        `<https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html>`_
+
+        :arg body: The detector
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            "/_ml/anomaly_detectors/_validate/detector",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("force", "timeout")
+    async def delete_data_frame_analytics(self, id, params=None, headers=None):
+        """
+        Deletes an existing data frame analytics job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to delete
+        :arg force: True if the job should be forcefully deleted
+        :arg timeout: Controls the time to wait until a job is deleted.
+            Defaults to 1 minute
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "data_frame", "analytics", id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def evaluate_data_frame(self, body, params=None, headers=None):
+        """
+        Evaluates the data frame analytics for an annotated index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/evaluate-dfanalytics.html>`_
+
+        :arg body: The evaluation definition
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            "/_ml/data_frame/_evaluate",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_match", "from_", "size")
+    async def get_data_frame_analytics(self, id=None, params=None, headers=None):
+        """
+        Retrieves configuration information for data frame analytics jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no data frame analytics. (This includes `_all` string or when no
+            data frame analytics have been specified)  Default: True
+        :arg from_: skips a number of analytics
+        :arg size: specifies a max number of analytics to get  Default:
+            100
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "data_frame", "analytics", id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_match", "from_", "size")
+    async def get_data_frame_analytics_stats(self, id=None, params=None, headers=None):
+        """
+        Retrieves usage information for data frame analytics jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-dfanalytics-stats.html>`_
+
+        :arg id: The ID of the data frame analytics stats to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no data frame analytics. (This includes `_all` string or when no
+            data frame analytics have been specified)  Default: True
+        :arg from_: skips a number of analytics
+        :arg size: specifies a max number of analytics to get  Default:
+            100
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "data_frame", "analytics", id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def put_data_frame_analytics(self, id, body, params=None, headers=None):
+        """
+        Instantiates a data frame analytics job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/put-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to create
+        :arg body: The data frame analytics configuration
+        """
+        for param in (id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "data_frame", "analytics", id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("timeout")
+    async def start_data_frame_analytics(
+        self, id, body=None, params=None, headers=None
+    ):
+        """
+        Starts a data frame analytics job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/start-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to start
+        :arg body: The start data frame analytics parameters
+        :arg timeout: Controls the time to wait until the task has
+            started. Defaults to 20 seconds
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "data_frame", "analytics", id, "_start"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_match", "force", "timeout")
+    async def stop_data_frame_analytics(self, id, body=None, params=None, headers=None):
+        """
+        Stops one or more data frame analytics jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/stop-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to stop
+        :arg body: The stop data frame analytics parameters
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no data frame analytics. (This includes `_all` string or when no
+            data frame analytics have been specified)
+        :arg force: True if the data frame analytics should be
+            forcefully stopped
+        :arg timeout: Controls the time to wait until the task has
+            stopped. Defaults to 20 seconds
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "data_frame", "analytics", id, "_stop"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def delete_trained_model(self, model_id, params=None, headers=None):
+        """
+        Deletes an existing trained inference model that is currently not referenced by
+        an ingest pipeline.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-inference.html>`_
+
+        :arg model_id: The ID of the trained model to delete
+        """
+        if model_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'model_id'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "inference", model_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_match",
+        "decompress_definition",
+        "from_",
+        "include_model_definition",
+        "size",
+        "tags",
+    )
+    async def get_trained_models(self, model_id=None, params=None, headers=None):
+        """
+        Retrieves configuration information for a trained inference model.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-inference.html>`_
+
+        :arg model_id: The ID of the trained models to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no trained models. (This includes `_all` string or when no
+            trained models have been specified)  Default: True
+        :arg decompress_definition: Should the model definition be
+            decompressed into valid JSON or returned in a custom compressed format.
+            Defaults to true.  Default: True
+        :arg from_: skips a number of trained models
+        :arg include_model_definition: Should the full model definition
+            be included in the results. These definitions can be large. So be
+            cautious when including them. Defaults to false.
+        :arg size: specifies a max number of trained models to get
+            Default: 100
+        :arg tags: A comma-separated list of tags that the model must
+            have.
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "inference", model_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_match", "from_", "size")
+    async def get_trained_models_stats(self, model_id=None, params=None, headers=None):
+        """
+        Retrieves usage information for trained inference models.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-inference-stats.html>`_
+
+        :arg model_id: The ID of the trained models stats to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no trained models. (This includes `_all` string or when no
+            trained models have been specified)  Default: True
+        :arg from_: skips a number of trained models
+        :arg size: specifies a max number of trained models to get
+            Default: 100
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "inference", model_id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def put_trained_model(self, model_id, body, params=None, headers=None):
+        """
+        Creates an inference trained model.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/put-inference.html>`_
+
+        :arg model_id: The ID of the trained models to store
+        :arg body: The trained model configuration
+        """
+        for param in (model_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "inference", model_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def estimate_model_memory(self, body, params=None, headers=None):
+        """
+        Estimates the model memory
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-apis.html>`_
+
+        :arg body: The analysis config, plus cardinality estimates for
+            fields it references
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            "/_ml/anomaly_detectors/_estimate_model_memory",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def explain_data_frame_analytics(
+        self, body=None, id=None, params=None, headers=None
+    ):
+        """
+        Explains a data frame analytics config.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/explain-dfanalytics.html>`_
+
+        :arg body: The data frame analytics config to explain
+        :arg id: The ID of the data frame analytics to explain
+        """
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "data_frame", "analytics", id, "_explain"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("from_", "size")
+    async def get_categories(
+        self, job_id, body=None, category_id=None, params=None, headers=None
+    ):
+        """
+        Retrieves anomaly detection job results for one or more categories.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-category.html>`_
+
+        :arg job_id: The name of the job
+        :arg body: Category selection details if not provided in URI
+        :arg category_id: The identifier of the category definition of
+            interest
+        :arg from_: skips a number of categories
+        :arg size: specifies a max number of categories to get
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml", "anomaly_detectors", job_id, "results", "categories", category_id
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("desc", "end", "from_", "size", "sort", "start")
+    async def get_model_snapshots(
+        self, job_id, body=None, snapshot_id=None, params=None, headers=None
+    ):
+        """
+        Retrieves information about model snapshots.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-snapshot.html>`_
+
+        :arg job_id: The ID of the job to fetch
+        :arg body: Model snapshot selection criteria
+        :arg snapshot_id: The ID of the snapshot to fetch
+        :arg desc: True if the results should be sorted in descending
+            order
+        :arg end: The filter 'end' query parameter
+        :arg from_: Skips a number of documents
+        :arg size: The default number of documents returned in queries
+            as a string.
+        :arg sort: Name of the field to sort on
+        :arg start: The filter 'start' query parameter
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("delete_intervening_results")
+    async def revert_model_snapshot(
+        self, job_id, snapshot_id, body=None, params=None, headers=None
+    ):
+        """
+        Reverts to a specific snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-revert-snapshot.html>`_
+
+        :arg job_id: The ID of the job to fetch
+        :arg snapshot_id: The ID of the snapshot to revert to
+        :arg body: Reversion options
+        :arg delete_intervening_results: Should we reset the results
+            back to the time of the snapshot?
+        """
+        for param in (job_id, snapshot_id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml",
+                "anomaly_detectors",
+                job_id,
+                "model_snapshots",
+                snapshot_id,
+                "_revert",
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def update_model_snapshot(
+        self, job_id, snapshot_id, body, params=None, headers=None
+    ):
+        """
+        Updates certain properties of a snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-update-snapshot.html>`_
+
+        :arg job_id: The ID of the job to fetch
+        :arg snapshot_id: The ID of the snapshot to update
+        :arg body: The model snapshot properties to update
+        """
+        for param in (job_id, snapshot_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml",
+                "anomaly_detectors",
+                job_id,
+                "model_snapshots",
+                snapshot_id,
+                "_update",
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/_async/client/monitoring.py b/elasticsearch_7/_async/client/monitoring.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4bf3b9e1d1b5930176a10a8268836296adadfbc
--- /dev/null
+++ b/elasticsearch_7/_async/client/monitoring.py
@@ -0,0 +1,34 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body
+
+
+class MonitoringClient(NamespacedClient):
+    @query_params("interval", "system_api_version", "system_id")
+    async def bulk(self, body, doc_type=None, params=None, headers=None):
+        """
+        Used by the monitoring features to send monitoring data.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/monitor-elasticsearch-cluster.html>`_
+
+        :arg body: The operation definition and data (action-data
+            pairs), separated by newlines
+        :arg doc_type: Default document type for items which don't
+            provide one
+        :arg interval: Collection interval (e.g., '10s' or '10000ms') of
+            the payload
+        :arg system_api_version: API Version of the monitored system
+        :arg system_id: Identifier of the monitored system
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_monitoring", doc_type, "bulk"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/_async/client/nodes.py b/elasticsearch_7/_async/client/nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..9053eac73660f9013e92a508cfbd4ea993f5be11
--- /dev/null
+++ b/elasticsearch_7/_async/client/nodes.py
@@ -0,0 +1,160 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path
+
+
+class NodesClient(NamespacedClient):
+    @query_params("timeout")
+    async def reload_secure_settings(
+        self, body=None, node_id=None, params=None, headers=None
+    ):
+        """
+        Reloads secure settings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/secure-settings.html#reloadable-secure-settings>`_
+
+        :arg body: An object containing the password for the
+            elasticsearch keystore
+        :arg node_id: A comma-separated list of node IDs to span the
+            reload/reinit call. Should stay empty because reloading usually involves
+            all cluster nodes.
+        :arg timeout: Explicit operation timeout
+        """
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_nodes", node_id, "reload_secure_settings"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("flat_settings", "timeout")
+    async def info(self, node_id=None, metric=None, params=None, headers=None):
+        """
+        Returns information about nodes in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-nodes-info.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg metric: A comma-separated list of metrics you wish
+            returned. Leave empty to return all.  Valid choices: settings, os,
+            process, jvm, thread_pool, transport, http, plugins, ingest
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg timeout: Explicit operation timeout
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_nodes", node_id, metric), params=params, headers=headers
+        )
+
+    @query_params(
+        "doc_type", "ignore_idle_threads", "interval", "snapshots", "threads", "timeout"
+    )
+    async def hot_threads(self, node_id=None, params=None, headers=None):
+        """
+        Returns information about hot threads on each node in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-nodes-hot-threads.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg doc_type: The type to sample (default: cpu)  Valid choices:
+            cpu, wait, block
+        :arg ignore_idle_threads: Don't show threads that are in known-
+            idle places, such as waiting on a socket select or pulling from an empty
+            task queue (default: true)
+        :arg interval: The interval for the second sampling of threads
+        :arg snapshots: Number of samples of thread stacktrace (default:
+            10)
+        :arg threads: Specify the number of threads to provide
+            information for (default: 3)
+        :arg timeout: Explicit operation timeout
+        """
+        # type is a reserved word so it cannot be used, use doc_type instead
+        if "doc_type" in params:
+            params["type"] = params.pop("doc_type")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_nodes", node_id, "hot_threads"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("timeout")
+    async def usage(self, node_id=None, metric=None, params=None, headers=None):
+        """
+        Returns low-level information about REST actions usage on nodes.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-nodes-usage.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg metric: Limit the information returned to the specified
+            metrics  Valid choices: _all, rest_actions
+        :arg timeout: Explicit operation timeout
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_nodes", node_id, "usage", metric),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "completion_fields",
+        "fielddata_fields",
+        "fields",
+        "groups",
+        "include_segment_file_sizes",
+        "level",
+        "timeout",
+        "types",
+    )
+    async def stats(
+        self, node_id=None, metric=None, index_metric=None, params=None, headers=None
+    ):
+        """
+        Returns statistical information about nodes in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-nodes-stats.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg metric: Limit the information returned to the specified
+            metrics  Valid choices: _all, breaker, fs, http, indices, jvm, os,
+            process, thread_pool, transport, discovery
+        :arg index_metric: Limit the information returned for `indices`
+            metric to the specific index metrics. Isn't used if `indices` (or `all`)
+            metric isn't specified.  Valid choices: _all, completion, docs,
+            fielddata, query_cache, flush, get, indexing, merge, request_cache,
+            refresh, search, segments, store, warmer, suggest
+        :arg completion_fields: A comma-separated list of fields for
+            `fielddata` and `suggest` index metric (supports wildcards)
+        :arg fielddata_fields: A comma-separated list of fields for
+            `fielddata` index metric (supports wildcards)
+        :arg fields: A comma-separated list of fields for `fielddata`
+            and `completion` index metric (supports wildcards)
+        :arg groups: A comma-separated list of search groups for
+            `search` index metric
+        :arg include_segment_file_sizes: Whether to report the
+            aggregated disk usage of each one of the Lucene index files (only
+            applies if segment stats are requested)
+        :arg level: Return indices stats aggregated at index, node or
+            shard level  Valid choices: indices, node, shards  Default: node
+        :arg timeout: Explicit operation timeout
+        :arg types: A comma-separated list of document types for the
+            `indexing` index metric
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_nodes", node_id, "stats", metric, index_metric),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/_async/client/remote.py b/elasticsearch_7/_async/client/remote.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c2767b1dfc6ecc89100b32bbbd435bac6f8ab4a
--- /dev/null
+++ b/elasticsearch_7/_async/client/remote.py
@@ -0,0 +1,16 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params
+
+
+class RemoteClient(NamespacedClient):
+    @query_params()
+    def info(self, params=None, headers=None):
+        """
+        `<http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_remote/info", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/client/rollup.py b/elasticsearch_7/_async/client/rollup.py
new file mode 100644
index 0000000000000000000000000000000000000000..01a747374c6cf10374b4fa52902f9920489d148d
--- /dev/null
+++ b/elasticsearch_7/_async/client/rollup.py
@@ -0,0 +1,157 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class RollupClient(NamespacedClient):
+    @query_params()
+    async def delete_job(self, id, params=None, headers=None):
+        """
+        Deletes an existing rollup job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-delete-job.html>`_
+
+        :arg id: The ID of the job to delete
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "DELETE", _make_path("_rollup", "job", id), params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_jobs(self, id=None, params=None, headers=None):
+        """
+        Retrieves the configuration, stats, and status of rollup jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-get-job.html>`_
+
+        :arg id: The ID of the job(s) to fetch. Accepts glob patterns,
+            or left blank for all jobs
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_rollup", "job", id), params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_rollup_caps(self, id=None, params=None, headers=None):
+        """
+        Returns the capabilities of any rollup jobs that have been configured for a
+        specific index or index pattern.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-get-rollup-caps.html>`_
+
+        :arg id: The ID of the index to check rollup capabilities on, or
+            left blank for all jobs
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_rollup", "data", id), params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_rollup_index_caps(self, index, params=None, headers=None):
+        """
+        Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the
+        index where rollup data is stored).
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-get-rollup-index-caps.html>`_
+
+        :arg index: The rollup index or index pattern to obtain rollup
+            capabilities from.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return await self.transport.perform_request(
+            "GET", _make_path(index, "_rollup", "data"), params=params, headers=headers
+        )
+
+    @query_params()
+    async def put_job(self, id, body, params=None, headers=None):
+        """
+        Creates a rollup job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-put-job.html>`_
+
+        :arg id: The ID of the job to create
+        :arg body: The job configuration
+        """
+        for param in (id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_rollup", "job", id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("rest_total_hits_as_int", "typed_keys")
+    async def rollup_search(
+        self, index, body, doc_type=None, params=None, headers=None
+    ):
+        """
+        Enables searching rolled-up data using the standard query DSL.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-search.html>`_
+
+        :arg index: The indices or index-pattern(s) (containing rollup
+            or regular data) that should be searched
+        :arg body: The search request body
+        :arg doc_type: The doc type inside the index
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        """
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_rollup_search"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def start_job(self, id, params=None, headers=None):
+        """
+        Starts an existing, stopped rollup job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-start-job.html>`_
+
+        :arg id: The ID of the job to start
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_rollup", "job", id, "_start"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("timeout", "wait_for_completion")
+    async def stop_job(self, id, params=None, headers=None):
+        """
+        Stops an existing, started rollup job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-stop-job.html>`_
+
+        :arg id: The ID of the job to stop
+        :arg timeout: Block for (at maximum) the specified duration
+            while waiting for the job to stop.  Defaults to 30s.
+        :arg wait_for_completion: True if the API should block until the
+            job has fully stopped, false if should be executed async. Defaults to
+            false.
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_rollup", "job", id, "_stop"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/_async/client/searchable_snapshots.py b/elasticsearch_7/_async/client/searchable_snapshots.py
new file mode 100644
index 0000000000000000000000000000000000000000..eecd873db134082a8a4bdbddc04d594066d916a4
--- /dev/null
+++ b/elasticsearch_7/_async/client/searchable_snapshots.py
@@ -0,0 +1,92 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class SearchableSnapshotsClient(NamespacedClient):
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
+    async def clear_cache(self, index=None, params=None, headers=None):
+        """
+        Clear the cache of searchable snapshots.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/searchable-snapshots-api-clear-cache.html>`_
+
+        :arg index: A comma-separated list of index name to limit the
+            operation
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        """
+        return await self.transport.perform_request(
+            "POST",
+            _make_path(index, "_searchable_snapshots", "cache", "clear"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "wait_for_completion")
+    async def mount(self, repository, snapshot, body, params=None, headers=None):
+        """
+        Mount a snapshot as a searchable index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/searchable-snapshots-api-mount-snapshot.html>`_
+
+        :arg repository: The name of the repository containing the
+            snapshot of the index to mount
+        :arg snapshot: The name of the snapshot of the index to mount
+        :arg body: The restore configuration for mounting the snapshot
+            as searchable
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg wait_for_completion: Should this request wait until the
+            operation has completed before returning
+        """
+        for param in (repository, snapshot, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_snapshot", repository, snapshot, "_mount"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def repository_stats(self, repository, params=None, headers=None):
+        """
+        Retrieve usage statistics about a snapshot repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/searchable-snapshots-repository-stats.html>`_
+
+        :arg repository: The repository for which to get the stats for
+        """
+        if repository in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'repository'.")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_snapshot", repository, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def stats(self, index=None, params=None, headers=None):
+        """
+        Retrieve various statistics about searchable snapshots.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/searchable-snapshots-api-stats.html>`_
+
+        :arg index: A comma-separated list of index names
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path(index, "_searchable_snapshots", "stats"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/_async/client/security.py b/elasticsearch_7/_async/client/security.py
new file mode 100644
index 0000000000000000000000000000000000000000..df6eef3ab996902115a023db772d347e7a3dff06
--- /dev/null
+++ b/elasticsearch_7/_async/client/security.py
@@ -0,0 +1,499 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class SecurityClient(NamespacedClient):
+    @query_params()
+    async def authenticate(self, params=None, headers=None):
+        """
+        Enables authentication as a user and retrieve information about the
+        authenticated user.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-authenticate.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_security/_authenticate", params=params, headers=headers
+        )
+
+    @query_params("refresh")
+    async def change_password(self, body, username=None, params=None, headers=None):
+        """
+        Changes the passwords of users in the native realm and built-in users.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-change-password.html>`_
+
+        :arg body: the new password for the user
+        :arg username: The username of the user to change the password
+            for
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "user", username, "_password"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("usernames")
+    async def clear_cached_realms(self, realms, params=None, headers=None):
+        """
+        Evicts users from the user cache. Can completely clear the cache or evict
+        specific users.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-clear-cache.html>`_
+
+        :arg realms: Comma-separated list of realms to clear
+        :arg usernames: Comma-separated list of usernames to clear from
+            the cache
+        """
+        if realms in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'realms'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_security", "realm", realms, "_clear_cache"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def clear_cached_roles(self, name, params=None, headers=None):
+        """
+        Evicts roles from the native role cache.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-clear-role-cache.html>`_
+
+        :arg name: Role name
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_security", "role", name, "_clear_cache"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    async def create_api_key(self, body, params=None, headers=None):
+        """
+        Creates an API key for access without requiring basic authentication.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-create-api-key.html>`_
+
+        :arg body: The api key request to create an API key
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "PUT", "/_security/api_key", params=params, headers=headers, body=body
+        )
+
+    @query_params("refresh")
+    async def delete_privileges(self, application, name, params=None, headers=None):
+        """
+        Removes application privileges.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-delete-privilege.html>`_
+
+        :arg application: Application name
+        :arg name: Privilege name
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        for param in (application, name):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_security", "privilege", application, name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    async def delete_role(self, name, params=None, headers=None):
+        """
+        Removes roles in the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-delete-role.html>`_
+
+        :arg name: Role name
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_security", "role", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    async def delete_role_mapping(self, name, params=None, headers=None):
+        """
+        Removes role mappings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-delete-role-mapping.html>`_
+
+        :arg name: Role-mapping name
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_security", "role_mapping", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    async def delete_user(self, username, params=None, headers=None):
+        """
+        Deletes users from the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-delete-user.html>`_
+
+        :arg username: username
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if username in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'username'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_security", "user", username),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    async def disable_user(self, username, params=None, headers=None):
+        """
+        Disables users in the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-disable-user.html>`_
+
+        :arg username: The username of the user to disable
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if username in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'username'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "user", username, "_disable"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    async def enable_user(self, username, params=None, headers=None):
+        """
+        Enables users in the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-enable-user.html>`_
+
+        :arg username: The username of the user to enable
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if username in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'username'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "user", username, "_enable"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("id", "name", "owner", "realm_name", "username")
+    async def get_api_key(self, params=None, headers=None):
+        """
+        Retrieves information for one or more API keys.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-api-key.html>`_
+
+        :arg id: API key id of the API key to be retrieved
+        :arg name: API key name of the API key to be retrieved
+        :arg owner: flag to query API keys owned by the currently
+            authenticated user
+        :arg realm_name: realm name of the user who created this API key
+            to be retrieved
+        :arg username: user name of the user who created this API key to
+            be retrieved
+        """
+        return await self.transport.perform_request(
+            "GET", "/_security/api_key", params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_privileges(
+        self, application=None, name=None, params=None, headers=None
+    ):
+        """
+        Retrieves application privileges.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-privileges.html>`_
+
+        :arg application: Application name
+        :arg name: Privilege name
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_security", "privilege", application, name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def get_role(self, name=None, params=None, headers=None):
+        """
+        Retrieves roles in the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-role.html>`_
+
+        :arg name: Role name
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_security", "role", name), params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_role_mapping(self, name=None, params=None, headers=None):
+        """
+        Retrieves role mappings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-role-mapping.html>`_
+
+        :arg name: Role-Mapping name
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_security", "role_mapping", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def get_token(self, body, params=None, headers=None):
+        """
+        Creates a bearer token for access without requiring basic authentication.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-token.html>`_
+
+        :arg body: The token request to get
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST", "/_security/oauth2/token", params=params, headers=headers, body=body
+        )
+
+    @query_params()
+    async def get_user(self, username=None, params=None, headers=None):
+        """
+        Retrieves information about users in the native realm and built-in users.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-user.html>`_
+
+        :arg username: A comma-separated list of usernames
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_security", "user", username),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def get_user_privileges(self, params=None, headers=None):
+        """
+        Retrieves application privileges.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-privileges.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_security/user/_privileges", params=params, headers=headers
+        )
+
+    @query_params()
+    async def has_privileges(self, body, user=None, params=None, headers=None):
+        """
+        Determines whether the specified user has a specified list of privileges.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-has-privileges.html>`_
+
+        :arg body: The privileges to test
+        :arg user: Username
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_security", "user", user, "_has_privileges"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def invalidate_api_key(self, body, params=None, headers=None):
+        """
+        Invalidates one or more API keys.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-invalidate-api-key.html>`_
+
+        :arg body: The api key request to invalidate API key(s)
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "DELETE", "/_security/api_key", params=params, headers=headers, body=body
+        )
+
+    @query_params()
+    async def invalidate_token(self, body, params=None, headers=None):
+        """
+        Invalidates one or more access tokens or refresh tokens.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-invalidate-token.html>`_
+
+        :arg body: The token to invalidate
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            "/_security/oauth2/token",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("refresh")
+    async def put_privileges(self, body, params=None, headers=None):
+        """
+        Adds or updates application privileges.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-put-privileges.html>`_
+
+        :arg body: The privilege(s) to add
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "PUT", "/_security/privilege/", params=params, headers=headers, body=body
+        )
+
+    @query_params("refresh")
+    async def put_role(self, name, body, params=None, headers=None):
+        """
+        Adds and updates roles in the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-put-role.html>`_
+
+        :arg name: Role name
+        :arg body: The role to add
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "role", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("refresh")
+    async def put_role_mapping(self, name, body, params=None, headers=None):
+        """
+        Creates and updates role mappings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-put-role-mapping.html>`_
+
+        :arg name: Role-mapping name
+        :arg body: The role mapping to add
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "role_mapping", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("refresh")
+    async def put_user(self, username, body, params=None, headers=None):
+        """
+        Adds and updates users in the native realm. These users are commonly referred
+        to as native users.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-put-user.html>`_
+
+        :arg username: The username of the User
+        :arg body: The user to add
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        for param in (username, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "user", username),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def get_builtin_privileges(self, params=None, headers=None):
+        """
+        Retrieves the list of cluster privileges and index privileges that are
+        available in this version of Elasticsearch.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-builtin-privileges.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_security/privilege/_builtin", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/client/slm.py b/elasticsearch_7/_async/client/slm.py
new file mode 100644
index 0000000000000000000000000000000000000000..00d6f2c1ab16e2178b63510ce5f287b23300e502
--- /dev/null
+++ b/elasticsearch_7/_async/client/slm.py
@@ -0,0 +1,135 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class SlmClient(NamespacedClient):
+    @query_params()
+    async def delete_lifecycle(self, policy_id, params=None, headers=None):
+        """
+        Deletes an existing snapshot lifecycle policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-delete-policy.html>`_
+
+        :arg policy_id: The id of the snapshot lifecycle policy to
+            remove
+        """
+        if policy_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'policy_id'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_slm", "policy", policy_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def execute_lifecycle(self, policy_id, params=None, headers=None):
+        """
+        Immediately creates a snapshot according to the lifecycle policy, without
+        waiting for the scheduled time.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-execute-lifecycle.html>`_
+
+        :arg policy_id: The id of the snapshot lifecycle policy to be
+            executed
+        """
+        if policy_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'policy_id'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_slm", "policy", policy_id, "_execute"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def execute_retention(self, params=None, headers=None):
+        """
+        Deletes any snapshots that are expired according to the policy's retention
+        rules.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-execute-retention.html>`_
+        """
+        return await self.transport.perform_request(
+            "POST", "/_slm/_execute_retention", params=params, headers=headers
+        )
+
+    @query_params()
+    async def get_lifecycle(self, policy_id=None, params=None, headers=None):
+        """
+        Retrieves one or more snapshot lifecycle policy definitions and information
+        about the latest snapshot attempts.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-get-policy.html>`_
+
+        :arg policy_id: Comma-separated list of snapshot lifecycle
+            policies to retrieve
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_slm", "policy", policy_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def get_stats(self, params=None, headers=None):
+        """
+        Returns global and policy-level statistics about actions taken by snapshot
+        lifecycle management.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-get-stats.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_slm/stats", params=params, headers=headers
+        )
+
+    @query_params()
+    async def put_lifecycle(self, policy_id, body=None, params=None, headers=None):
+        """
+        Creates or updates a snapshot lifecycle policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-put-policy.html>`_
+
+        :arg policy_id: The id of the snapshot lifecycle policy
+        :arg body: The snapshot lifecycle policy definition to register
+        """
+        if policy_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'policy_id'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_slm", "policy", policy_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def get_status(self, params=None, headers=None):
+        """
+        Retrieves the status of snapshot lifecycle management (SLM).
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-get-status.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_slm/status", params=params, headers=headers
+        )
+
+    @query_params()
+    async def start(self, params=None, headers=None):
+        """
+        Turns on snapshot lifecycle management (SLM).
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-start.html>`_
+        """
+        return await self.transport.perform_request(
+            "POST", "/_slm/start", params=params, headers=headers
+        )
+
+    @query_params()
+    async def stop(self, params=None, headers=None):
+        """
+        Turns off snapshot lifecycle management (SLM).
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-stop.html>`_
+        """
+        return await self.transport.perform_request(
+            "POST", "/_slm/stop", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/client/snapshot.py b/elasticsearch_7/_async/client/snapshot.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d446760a4b1f1d96aefe500600acc633f1b10b8
--- /dev/null
+++ b/elasticsearch_7/_async/client/snapshot.py
@@ -0,0 +1,234 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class SnapshotClient(NamespacedClient):
+    @query_params("master_timeout", "wait_for_completion")
+    async def create(self, repository, snapshot, body=None, params=None, headers=None):
+        """
+        Creates a snapshot in a repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg snapshot: A snapshot name
+        :arg body: The snapshot definition
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg wait_for_completion: Should this request wait until the
+            operation has completed before returning
+        """
+        for param in (repository, snapshot):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_snapshot", repository, snapshot),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("master_timeout")
+    async def delete(self, repository, snapshot, params=None, headers=None):
+        """
+        Deletes a snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg snapshot: A snapshot name
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        for param in (repository, snapshot):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_snapshot", repository, snapshot),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("ignore_unavailable", "master_timeout", "verbose")
+    async def get(self, repository, snapshot, params=None, headers=None):
+        """
+        Returns information about a snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg snapshot: A comma-separated list of snapshot names
+        :arg ignore_unavailable: Whether to ignore unavailable
+            snapshots, defaults to false which means a SnapshotMissingException is
+            thrown
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg verbose: Whether to show verbose snapshot info or only show
+            the basic info found in the repository index blob
+        """
+        for param in (repository, snapshot):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_snapshot", repository, snapshot),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def delete_repository(self, repository, params=None, headers=None):
+        """
+        Deletes a repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: Name of the snapshot repository to unregister.
+            Wildcard (`*`) patterns are supported.
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        if repository in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'repository'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_snapshot", repository),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("local", "master_timeout")
+    async def get_repository(self, repository=None, params=None, headers=None):
+        """
+        Returns information about a repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A comma-separated list of repository names
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return await self.transport.perform_request(
+            "GET", _make_path("_snapshot", repository), params=params, headers=headers
+        )
+
+    @query_params("master_timeout", "timeout", "verify")
+    async def create_repository(self, repository, body, params=None, headers=None):
+        """
+        Creates a repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg body: The repository definition
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        :arg verify: Whether to verify the repository after creation
+        """
+        for param in (repository, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_snapshot", repository),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("master_timeout", "wait_for_completion")
+    async def restore(self, repository, snapshot, body=None, params=None, headers=None):
+        """
+        Restores a snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg snapshot: A snapshot name
+        :arg body: Details of what to restore
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg wait_for_completion: Should this request wait until the
+            operation has completed before returning
+        """
+        for param in (repository, snapshot):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_snapshot", repository, snapshot, "_restore"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("ignore_unavailable", "master_timeout")
+    async def status(self, repository=None, snapshot=None, params=None, headers=None):
+        """
+        Returns information about the status of a snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg snapshot: A comma-separated list of snapshot names
+        :arg ignore_unavailable: Whether to ignore unavailable
+            snapshots, defaults to false which means a SnapshotMissingException is
+            thrown
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_snapshot", repository, snapshot, "_status"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def verify_repository(self, repository, params=None, headers=None):
+        """
+        Verifies a repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        if repository in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'repository'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_snapshot", repository, "_verify"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "timeout")
+    async def cleanup_repository(self, repository, params=None, headers=None):
+        """
+        Removes stale data from repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/clean-up-snapshot-repo-api.html>`_
+
+        :arg repository: A repository name
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        if repository in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'repository'.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_snapshot", repository, "_cleanup"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/_async/client/sql.py b/elasticsearch_7/_async/client/sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc3c3d3d38c80567b0c78b0be123779c06ecfe2c
--- /dev/null
+++ b/elasticsearch_7/_async/client/sql.py
@@ -0,0 +1,56 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, SKIP_IN_PATH
+
+
+class SqlClient(NamespacedClient):
+    @query_params()
+    async def clear_cursor(self, body, params=None, headers=None):
+        """
+        Clears the SQL cursor
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/sql-pagination.html>`_
+
+        :arg body: Specify the cursor value in the `cursor` element to
+            clean the cursor.
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST", "/_sql/close", params=params, headers=headers, body=body
+        )
+
+    @query_params("format")
+    async def query(self, body, params=None, headers=None):
+        """
+        Executes a SQL request
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/sql-rest-overview.html>`_
+
+        :arg body: Use the `query` element to start a query. Use the
+            `cursor` element to continue a query.
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST", "/_sql", params=params, headers=headers, body=body
+        )
+
+    @query_params()
+    async def translate(self, body, params=None, headers=None):
+        """
+        Translates SQL into Elasticsearch queries
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/sql-translate.html>`_
+
+        :arg body: Specify the query in the `query` element.
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST", "/_sql/translate", params=params, headers=headers, body=body
+        )
diff --git a/elasticsearch_7/_async/client/ssl.py b/elasticsearch_7/_async/client/ssl.py
new file mode 100644
index 0000000000000000000000000000000000000000..0adafbd2ff2b79bee7cc59004aea397e2308384f
--- /dev/null
+++ b/elasticsearch_7/_async/client/ssl.py
@@ -0,0 +1,18 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params
+
+
+class SslClient(NamespacedClient):
+    @query_params()
+    async def certificates(self, params=None, headers=None):
+        """
+        Retrieves information about the X.509 certificates used to encrypt
+        communications in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-ssl.html>`_
+        """
+        return await self.transport.perform_request(
+            "GET", "/_ssl/certificates", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/client/tasks.py b/elasticsearch_7/_async/client/tasks.py
new file mode 100644
index 0000000000000000000000000000000000000000..4707c49b97c2f49da167217e160e07ca801ce8ab
--- /dev/null
+++ b/elasticsearch_7/_async/client/tasks.py
@@ -0,0 +1,89 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import warnings
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class TasksClient(NamespacedClient):
+    @query_params(
+        "actions",
+        "detailed",
+        "group_by",
+        "nodes",
+        "parent_task_id",
+        "timeout",
+        "wait_for_completion",
+    )
+    async def list(self, params=None, headers=None):
+        """
+        Returns a list of tasks.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/tasks.html>`_
+
+        :arg actions: A comma-separated list of actions that should be
+            returned. Leave empty to return all.
+        :arg detailed: Return detailed task information (default: false)
+        :arg group_by: Group tasks by nodes or parent/child
+            relationships  Valid choices: nodes, parents, none  Default: nodes
+        :arg nodes: A comma-separated list of node IDs or names to limit
+            the returned information; use `_local` to return information from the
+            node you're connecting to, leave empty to get information from all nodes
+        :arg parent_task_id: Return tasks with specified parent task id
+            (node_id:task_number). Set to -1 to return all.
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_completion: Wait for the matching tasks to
+            complete (default: false)
+        """
+        return await self.transport.perform_request(
+            "GET", "/_tasks", params=params, headers=headers
+        )
+
+    @query_params("actions", "nodes", "parent_task_id", "wait_for_completion")
+    async def cancel(self, task_id=None, params=None, headers=None):
+        """
+        Cancels a task, if it can be cancelled through an API.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/tasks.html>`_
+
+        :arg task_id: Cancel the task with specified task id
+            (node_id:task_number)
+        :arg actions: A comma-separated list of actions that should be
+            cancelled. Leave empty to cancel all.
+        :arg nodes: A comma-separated list of node IDs or names to limit
+            the returned information; use `_local` to return information from the
+            node you're connecting to, leave empty to get information from all nodes
+        :arg parent_task_id: Cancel tasks with specified parent task id
+            (node_id:task_number). Set to -1 to cancel all.
+        :arg wait_for_completion: Should the request block until the
+            cancellation of the task and its descendant tasks is completed. Defaults
+            to false
+        """
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_tasks", task_id, "_cancel"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("timeout", "wait_for_completion")
+    async def get(self, task_id=None, params=None, headers=None):
+        """
+        Returns information about a task.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/tasks.html>`_
+
+        :arg task_id: Return the task with specified id
+            (node_id:task_number)
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_completion: Wait for the matching tasks to
+            complete (default: false)
+        """
+        if task_id in SKIP_IN_PATH:
+            warnings.warn(
+                "Calling client.tasks.get() without a task_id is deprecated "
+                "and will be removed in v8.0. Use client.tasks.list() instead.",
+                category=DeprecationWarning,
+            )
+
+        return await self.transport.perform_request(
+            "GET", _make_path("_tasks", task_id), params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/client/transform.py b/elasticsearch_7/_async/client/transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..adba187e0440f2a62279338567f9457761306810
--- /dev/null
+++ b/elasticsearch_7/_async/client/transform.py
@@ -0,0 +1,208 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class TransformClient(NamespacedClient):
+    @query_params("force")
+    async def delete_transform(self, transform_id, params=None, headers=None):
+        """
+        Deletes an existing transform.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-transform.html>`_
+
+        :arg transform_id: The id of the transform to delete
+        :arg force: When `true`, the transform is deleted regardless of
+            its current state. The default value is `false`, meaning that the
+            transform must be `stopped` before it can be deleted.
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_transform", transform_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_match", "from_", "size")
+    async def get_transform(self, transform_id=None, params=None, headers=None):
+        """
+        Retrieves configuration information for transforms.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-transform.html>`_
+
+        :arg transform_id: The id or comma delimited list of id
+            expressions of the transforms to get, '_all' or '*' implies get all
+            transforms
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no transforms. (This includes `_all` string or when no
+            transforms have been specified)
+        :arg from_: skips a number of transform configs, defaults to 0
+        :arg size: specifies a max number of transforms to get, defaults
+            to 100
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_transform", transform_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_match", "from_", "size")
+    async def get_transform_stats(self, transform_id, params=None, headers=None):
+        """
+        Retrieves usage information for transforms.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-transform-stats.html>`_
+
+        :arg transform_id: The id of the transform for which to get
+            stats. '_all' or '*' implies all transforms
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no transforms. (This includes `_all` string or when no
+            transforms have been specified)
+        :arg from_: skips a number of transform stats, defaults to 0
+        :arg size: specifies a max number of transform stats to get,
+            defaults to 100
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_transform", transform_id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def preview_transform(self, body, params=None, headers=None):
+        """
+        Previews a transform.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/preview-transform.html>`_
+
+        :arg body: The definition for the transform to preview
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return await self.transport.perform_request(
+            "POST", "/_transform/_preview", params=params, headers=headers, body=body
+        )
+
+    @query_params("defer_validation")
+    async def put_transform(self, transform_id, body, params=None, headers=None):
+        """
+        Instantiates a transform.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/put-transform.html>`_
+
+        :arg transform_id: The id of the new transform.
+        :arg body: The transform definition
+        :arg defer_validation: If validations should be deferred until
+            transform starts, defaults to false.
+        """
+        for param in (transform_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_transform", transform_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("timeout")
+    async def start_transform(self, transform_id, params=None, headers=None):
+        """
+        Starts one or more transforms.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/start-transform.html>`_
+
+        :arg transform_id: The id of the transform to start
+        :arg timeout: Controls the time to wait for the transform to
+            start
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_transform", transform_id, "_start"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_match",
+        "force",
+        "timeout",
+        "wait_for_checkpoint",
+        "wait_for_completion",
+    )
+    async def stop_transform(self, transform_id, params=None, headers=None):
+        """
+        Stops one or more transforms.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/stop-transform.html>`_
+
+        :arg transform_id: The id of the transform to stop
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no transforms. (This includes `_all` string or when no
+            transforms have been specified)
+        :arg force: Whether to force stop a failed transform or not.
+            Default to false
+        :arg timeout: Controls the time to wait until the transform has
+            stopped. Default to 30 seconds
+        :arg wait_for_checkpoint: Whether to wait for the transform to
+            reach a checkpoint before stopping. Default to false
+        :arg wait_for_completion: Whether to wait for the transform to
+            fully stop before returning or not. Default to false
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_transform", transform_id, "_stop"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("defer_validation")
+    async def update_transform(self, transform_id, body, params=None, headers=None):
+        """
+        Updates certain properties of a transform.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/update-transform.html>`_
+
+        :arg transform_id: The id of the transform.
+        :arg body: The update transform definition
+        :arg defer_validation: If validations should be deferred until
+            transform starts, defaults to false.
+        """
+        for param in (transform_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return await self.transport.perform_request(
+            "POST",
+            _make_path("_transform", transform_id, "_update"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/_async/client/utils.py b/elasticsearch_7/_async/client/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f28944f3c5b2b150ef0d220f7affdc22b5dea8e4
--- /dev/null
+++ b/elasticsearch_7/_async/client/utils.py
@@ -0,0 +1,13 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from ...client.utils import (  # noqa
+    _make_path,
+    _normalize_hosts,
+    _escape,
+    _bulk_body,
+    query_params,
+    SKIP_IN_PATH,
+    NamespacedClient,
+)
diff --git a/elasticsearch_7/_async/client/watcher.py b/elasticsearch_7/_async/client/watcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..f85ef82c0406e281faf71355b12a1a3e9d253086
--- /dev/null
+++ b/elasticsearch_7/_async/client/watcher.py
@@ -0,0 +1,180 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class WatcherClient(NamespacedClient):
+    @query_params()
+    async def ack_watch(self, watch_id, action_id=None, params=None, headers=None):
+        """
+        Acknowledges a watch, manually throttling the execution of the watch's actions.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-ack-watch.html>`_
+
+        :arg watch_id: Watch ID
+        :arg action_id: A comma-separated list of the action ids to be
+            acked
+        """
+        if watch_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'watch_id'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_watcher", "watch", watch_id, "_ack", action_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def activate_watch(self, watch_id, params=None, headers=None):
+        """
+        Activates a currently inactive watch.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-activate-watch.html>`_
+
+        :arg watch_id: Watch ID
+        """
+        if watch_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'watch_id'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_watcher", "watch", watch_id, "_activate"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def deactivate_watch(self, watch_id, params=None, headers=None):
+        """
+        Deactivates a currently active watch.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-deactivate-watch.html>`_
+
+        :arg watch_id: Watch ID
+        """
+        if watch_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'watch_id'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_watcher", "watch", watch_id, "_deactivate"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def delete_watch(self, id, params=None, headers=None):
+        """
+        Removes a watch from Watcher.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-delete-watch.html>`_
+
+        :arg id: Watch ID
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "DELETE",
+            _make_path("_watcher", "watch", id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("debug")
+    async def execute_watch(self, body=None, id=None, params=None, headers=None):
+        """
+        Forces the execution of a stored watch.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-execute-watch.html>`_
+
+        :arg body: Execution control
+        :arg id: Watch ID
+        :arg debug: indicates whether the watch should execute in debug
+            mode
+        """
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_watcher", "watch", id, "_execute"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def get_watch(self, id, params=None, headers=None):
+        """
+        Retrieves a watch by its ID.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-get-watch.html>`_
+
+        :arg id: Watch ID
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "GET", _make_path("_watcher", "watch", id), params=params, headers=headers
+        )
+
+    @query_params("active", "if_primary_term", "if_seq_no", "version")
+    async def put_watch(self, id, body=None, params=None, headers=None):
+        """
+        Creates a new watch, or updates an existing one.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-put-watch.html>`_
+
+        :arg id: Watch ID
+        :arg body: The watch
+        :arg active: Specify whether the watch is in/active by default
+        :arg if_primary_term: only update the watch if the last
+            operation that has changed the watch has the specified primary term
+        :arg if_seq_no: only update the watch if the last operation that
+            has changed the watch has the specified sequence number
+        :arg version: Explicit version number for concurrency control
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return await self.transport.perform_request(
+            "PUT",
+            _make_path("_watcher", "watch", id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    async def start(self, params=None, headers=None):
+        """
+        Starts Watcher if it is not already running.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-start.html>`_
+        """
+        return await self.transport.perform_request(
+            "POST", "/_watcher/_start", params=params, headers=headers
+        )
+
+    @query_params("emit_stacktraces")
+    async def stats(self, metric=None, params=None, headers=None):
+        """
+        Retrieves the current Watcher metrics.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-stats.html>`_
+
+        :arg metric: Controls what additional stat metrics should be
+            include in the response  Valid choices: _all, queued_watches,
+            current_watches, pending_watches
+        :arg emit_stacktraces: Emits stack traces of currently running
+            watches
+        """
+        return await self.transport.perform_request(
+            "GET",
+            _make_path("_watcher", "stats", metric),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    async def stop(self, params=None, headers=None):
+        """
+        Stops Watcher if it is running.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-stop.html>`_
+        """
+        return await self.transport.perform_request(
+            "POST", "/_watcher/_stop", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/client/xpack.py b/elasticsearch_7/_async/client/xpack.py
new file mode 100644
index 0000000000000000000000000000000000000000..0725ab2846821045fc4dcc09e57e9110a17368f7
--- /dev/null
+++ b/elasticsearch_7/_async/client/xpack.py
@@ -0,0 +1,36 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params
+
+
+class XPackClient(NamespacedClient):
+    def __getattr__(self, attr_name):
+        return getattr(self.client, attr_name)
+
+    # AUTO-GENERATED-API-DEFINITIONS #
+    @query_params("categories")
+    async def info(self, params=None, headers=None):
+        """
+        Retrieves information about the installed X-Pack features.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/info-api.html>`_
+
+        :arg categories: Comma-separated list of info categories. Can be
+            any of: build, license, features
+        """
+        return await self.transport.perform_request(
+            "GET", "/_xpack", params=params, headers=headers
+        )
+
+    @query_params("master_timeout")
+    async def usage(self, params=None, headers=None):
+        """
+        Retrieves usage information about the installed X-Pack features.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/usage-api.html>`_
+
+        :arg master_timeout: Specify timeout for watch write operation
+        """
+        return await self.transport.perform_request(
+            "GET", "/_xpack/usage", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/_async/compat.py b/elasticsearch_7/_async/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..9de896073ca10b6fa2c4f77682e19935a49946e0
--- /dev/null
+++ b/elasticsearch_7/_async/compat.py
@@ -0,0 +1,24 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import asyncio
+from ..compat import *  # noqa
+
+# Hack supporting Python 3.6 asyncio which didn't have 'get_running_loop()'.
+# Essentially we want to get away from having users pass in a loop to us.
+# Instead we should call 'get_running_loop()' whenever we need
+# the currently running loop.
+# See: https://aiopg.readthedocs.io/en/stable/run_loop.html#implementation
+try:
+    from asyncio import get_running_loop
+except ImportError:
+
+    def get_running_loop():
+        loop = asyncio.get_event_loop()
+        if not loop.is_running():
+            raise RuntimeError("no running event loop")
+        return loop
+
+
+__all__ = ["get_running_loop"]
diff --git a/elasticsearch_7/_async/helpers.py b/elasticsearch_7/_async/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5ab9214e1dd07c77053f921c0e6f23d6ccb29b3
--- /dev/null
+++ b/elasticsearch_7/_async/helpers.py
@@ -0,0 +1,412 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import asyncio
+
+from ..exceptions import TransportError
+from ..compat import map
+
+from ..helpers.actions import (
+    _ActionChunker,
+    _process_bulk_chunk_error,
+    _process_bulk_chunk_success,
+    expand_action,
+)
+from ..helpers.errors import ScanError
+
+import logging
+
+
+logger = logging.getLogger("elasticsearch.helpers")
+
+
+async def _chunk_actions(actions, chunk_size, max_chunk_bytes, serializer):
+    """
+    Split actions into chunks by number or size, serialize them into strings in
+    the process.
+    """
+    chunker = _ActionChunker(
+        chunk_size=chunk_size, max_chunk_bytes=max_chunk_bytes, serializer=serializer
+    )
+    async for action, data in actions:
+        ret = chunker.feed(action, data)
+        if ret:
+            yield ret
+    ret = chunker.flush()
+    if ret:
+        yield ret
+
+
+async def _process_bulk_chunk(
+    client,
+    bulk_actions,
+    bulk_data,
+    raise_on_exception=True,
+    raise_on_error=True,
+    *args,
+    **kwargs
+):
+    """
+    Send a bulk request to elasticsearch and process the output.
+    """
+    try:
+        # send the actual request
+        resp = await client.bulk("\n".join(bulk_actions) + "\n", *args, **kwargs)
+    except TransportError as e:
+        gen = _process_bulk_chunk_error(
+            error=e,
+            bulk_data=bulk_data,
+            raise_on_exception=raise_on_exception,
+            raise_on_error=raise_on_error,
+        )
+    else:
+        gen = _process_bulk_chunk_success(
+            resp=resp, bulk_data=bulk_data, raise_on_error=raise_on_error
+        )
+    for item in gen:
+        yield item
+
+
+def aiter(x):
+    """Turns an async iterable or iterable into an async iterator"""
+    if hasattr(x, "__anext__"):
+        return x
+    elif hasattr(x, "__aiter__"):
+        return x.__aiter__()
+
+    async def f():
+        for item in x:
+            yield item
+
+    return f().__aiter__()
+
+
+async def azip(*iterables):
+    """Zips async iterables and iterables into an async iterator
+    with the same behavior as zip()
+    """
+    aiters = [aiter(x) for x in iterables]
+    try:
+        while True:
+            yield tuple([await x.__anext__() for x in aiters])
+    except StopAsyncIteration:
+        pass
+
+
+async def async_streaming_bulk(
+    client,
+    actions,
+    chunk_size=500,
+    max_chunk_bytes=100 * 1024 * 1024,
+    raise_on_error=True,
+    expand_action_callback=expand_action,
+    raise_on_exception=True,
+    max_retries=0,
+    initial_backoff=2,
+    max_backoff=600,
+    yield_ok=True,
+    *args,
+    **kwargs
+):
+
+    """
+    Streaming bulk consumes actions from the iterable passed in and yields
+    results per action. For non-streaming usecases use
+    :func:`~elasticsearch.helpers.async_bulk` which is a wrapper around streaming
+    bulk that returns summary information about the bulk operation once the
+    entire input is consumed and sent.
+
+    If you specify ``max_retries`` it will also retry any documents that were
+    rejected with a ``429`` status code. To do this it will wait (**by calling
+    asyncio.sleep**) for ``initial_backoff`` seconds and then,
+    every subsequent rejection for the same chunk, for double the time every
+    time up to ``max_backoff`` seconds.
+
+    :arg client: instance of :class:`~elasticsearch.AsyncElasticsearch` to use
+    :arg actions: iterable or async iterable containing the actions to be executed
+    :arg chunk_size: number of docs in one chunk sent to es (default: 500)
+    :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
+    :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
+        from the execution of the last chunk when some occur. By default we raise.
+    :arg raise_on_exception: if ``False`` then don't propagate exceptions from
+        call to ``bulk`` and just report the items that failed as failed.
+    :arg expand_action_callback: callback executed on each action passed in,
+        should return a tuple containing the action line and the data line
+        (`None` if data line should be omitted).
+    :arg max_retries: maximum number of times a document will be retried when
+        ``429`` is received, set to 0 (default) for no retries on ``429``
+    :arg initial_backoff: number of seconds we should wait before the first
+        retry. Any subsequent retries will be powers of ``initial_backoff *
+        2**retry_number``
+    :arg max_backoff: maximum number of seconds a retry will wait
+    :arg yield_ok: if set to False will skip successful documents in the output
+    """
+
+    async def map_actions():
+        async for item in aiter(actions):
+            yield expand_action_callback(item)
+
+    async for bulk_data, bulk_actions in _chunk_actions(
+        map_actions(), chunk_size, max_chunk_bytes, client.transport.serializer
+    ):
+
+        for attempt in range(max_retries + 1):
+            to_retry, to_retry_data = [], []
+            if attempt:
+                await asyncio.sleep(
+                    min(max_backoff, initial_backoff * 2 ** (attempt - 1))
+                )
+
+            try:
+                async for data, (ok, info) in azip(
+                    bulk_data,
+                    _process_bulk_chunk(
+                        client,
+                        bulk_actions,
+                        bulk_data,
+                        raise_on_exception,
+                        raise_on_error,
+                        *args,
+                        **kwargs
+                    ),
+                ):
+
+                    if not ok:
+                        action, info = info.popitem()
+                        # retry if retries enabled, we get 429, and we are not
+                        # in the last attempt
+                        if (
+                            max_retries
+                            and info["status"] == 429
+                            and (attempt + 1) <= max_retries
+                        ):
+                            # _process_bulk_chunk expects strings so we need to
+                            # re-serialize the data
+                            to_retry.extend(
+                                map(client.transport.serializer.dumps, data)
+                            )
+                            to_retry_data.append(data)
+                        else:
+                            yield ok, {action: info}
+                    elif yield_ok:
+                        yield ok, info
+
+            except TransportError as e:
+                # suppress 429 errors since we will retry them
+                if attempt == max_retries or e.status_code != 429:
+                    raise
+            else:
+                if not to_retry:
+                    break
+                # retry only subset of documents that didn't succeed
+                bulk_actions, bulk_data = to_retry, to_retry_data
+
+
+async def async_bulk(client, actions, stats_only=False, *args, **kwargs):
+    """
+    Helper for the :meth:`~elasticsearch.AsyncElasticsearch.bulk` api that provides
+    a more human friendly interface - it consumes an iterator of actions and
+    sends them to elasticsearch in chunks. It returns a tuple with summary
+    information - number of successfully executed actions and either list of
+    errors or number of errors if ``stats_only`` is set to ``True``. Note that
+    by default we raise a ``BulkIndexError`` when we encounter an error so
+    options like ``stats_only`` only+ apply when ``raise_on_error`` is set to
+    ``False``.
+
+    When errors are being collected original document data is included in the
+    error dictionary which can lead to an extra high memory usage. If you need
+    to process a lot of data and want to ignore/collect errors please consider
+    using the :func:`~elasticsearch.helpers.async_streaming_bulk` helper which will
+    just return the errors and not store them in memory.
+
+
+    :arg client: instance of :class:`~elasticsearch.AsyncElasticsearch` to use
+    :arg actions: iterator containing the actions
+    :arg stats_only: if `True` only report number of successful/failed
+        operations instead of just number of successful and a list of error responses
+
+    Any additional keyword arguments will be passed to
+    :func:`~elasticsearch.helpers.async_streaming_bulk` which is used to execute
+    the operation, see :func:`~elasticsearch.helpers.async_streaming_bulk` for more
+    accepted parameters.
+    """
+    success, failed = 0, 0
+
+    # list of errors to be collected is not stats_only
+    errors = []
+
+    # make streaming_bulk yield successful results so we can count them
+    kwargs["yield_ok"] = True
+    async for ok, item in async_streaming_bulk(client, actions, *args, **kwargs):
+        # go through request-response pairs and detect failures
+        if not ok:
+            if not stats_only:
+                errors.append(item)
+            failed += 1
+        else:
+            success += 1
+
+    return success, failed if stats_only else errors
+
+
+async def async_scan(
+    client,
+    query=None,
+    scroll="5m",
+    raise_on_error=True,
+    preserve_order=False,
+    size=1000,
+    request_timeout=None,
+    clear_scroll=True,
+    scroll_kwargs=None,
+    **kwargs
+):
+    """
+    Simple abstraction on top of the
+    :meth:`~elasticsearch.AsyncElasticsearch.scroll` api - a simple iterator that
+    yields all hits as returned by underlining scroll requests.
+
+    By default scan does not return results in any pre-determined order. To
+    have a standard order in the returned documents (either by score or
+    explicit sort definition) when scrolling, use ``preserve_order=True``. This
+    may be an expensive operation and will negate the performance benefits of
+    using ``scan``.
+
+    :arg client: instance of :class:`~elasticsearch.AsyncElasticsearch` to use
+    :arg query: body for the :meth:`~elasticsearch.AsyncElasticsearch.search` api
+    :arg scroll: Specify how long a consistent view of the index should be
+        maintained for scrolled search
+    :arg raise_on_error: raises an exception (``ScanError``) if an error is
+        encountered (some shards fail to execute). By default we raise.
+    :arg preserve_order: don't set the ``search_type`` to ``scan`` - this will
+        cause the scroll to paginate with preserving the order. Note that this
+        can be an extremely expensive operation and can easily lead to
+        unpredictable results, use with caution.
+    :arg size: size (per shard) of the batch send at each iteration.
+    :arg request_timeout: explicit timeout for each call to ``scan``
+    :arg clear_scroll: explicitly calls delete on the scroll id via the clear
+        scroll API at the end of the method on completion or error, defaults
+        to true.
+    :arg scroll_kwargs: additional kwargs to be passed to
+        :meth:`~elasticsearch.AsyncElasticsearch.scroll`
+
+    Any additional keyword arguments will be passed to the initial
+    :meth:`~elasticsearch.AsyncElasticsearch.search` call::
+
+        async_scan(es,
+            query={"query": {"match": {"title": "python"}}},
+            index="orders-*",
+            doc_type="books"
+        )
+
+    """
+    scroll_kwargs = scroll_kwargs or {}
+
+    if not preserve_order:
+        query = query.copy() if query else {}
+        query["sort"] = "_doc"
+
+    # initial search
+    resp = await client.search(
+        body=query, scroll=scroll, size=size, request_timeout=request_timeout, **kwargs
+    )
+    scroll_id = resp.get("_scroll_id")
+
+    try:
+        while scroll_id and resp["hits"]["hits"]:
+            for hit in resp["hits"]["hits"]:
+                yield hit
+
+            # check if we have any errors
+            if (resp["_shards"]["successful"] + resp["_shards"]["skipped"]) < resp[
+                "_shards"
+            ]["total"]:
+                logger.warning(
+                    "Scroll request has only succeeded on %d (+%d skipped) shards out of %d.",
+                    resp["_shards"]["successful"],
+                    resp["_shards"]["skipped"],
+                    resp["_shards"]["total"],
+                )
+                if raise_on_error:
+                    raise ScanError(
+                        scroll_id,
+                        "Scroll request has only succeeded on %d (+%d skiped) shards out of %d."
+                        % (
+                            resp["_shards"]["successful"],
+                            resp["_shards"]["skipped"],
+                            resp["_shards"]["total"],
+                        ),
+                    )
+            resp = await client.scroll(
+                body={"scroll_id": scroll_id, "scroll": scroll}, **scroll_kwargs
+            )
+            scroll_id = resp.get("_scroll_id")
+
+    finally:
+        if scroll_id and clear_scroll:
+            await client.clear_scroll(body={"scroll_id": [scroll_id]}, ignore=(404,))
+
+
+async def async_reindex(
+    client,
+    source_index,
+    target_index,
+    query=None,
+    target_client=None,
+    chunk_size=500,
+    scroll="5m",
+    scan_kwargs={},
+    bulk_kwargs={},
+):
+
+    """
+    Reindex all documents from one index that satisfy a given query
+    to another, potentially (if `target_client` is specified) on a different cluster.
+    If you don't specify the query you will reindex all the documents.
+
+    Since ``2.3`` a :meth:`~elasticsearch.AsyncElasticsearch.reindex` api is
+    available as part of elasticsearch itself. It is recommended to use the api
+    instead of this helper wherever possible. The helper is here mostly for
+    backwards compatibility and for situations where more flexibility is
+    needed.
+
+    .. note::
+
+        This helper doesn't transfer mappings, just the data.
+
+    :arg client: instance of :class:`~elasticsearch.AsyncElasticsearch` to use (for
+        read if `target_client` is specified as well)
+    :arg source_index: index (or list of indices) to read documents from
+    :arg target_index: name of the index in the target cluster to populate
+    :arg query: body for the :meth:`~elasticsearch.AsyncElasticsearch.search` api
+    :arg target_client: optional, is specified will be used for writing (thus
+        enabling reindex between clusters)
+    :arg chunk_size: number of docs in one chunk sent to es (default: 500)
+    :arg scroll: Specify how long a consistent view of the index should be
+        maintained for scrolled search
+    :arg scan_kwargs: additional kwargs to be passed to
+        :func:`~elasticsearch.helpers.async_scan`
+    :arg bulk_kwargs: additional kwargs to be passed to
+        :func:`~elasticsearch.helpers.async_bulk`
+    """
+    target_client = client if target_client is None else target_client
+    docs = async_scan(
+        client, query=query, index=source_index, scroll=scroll, **scan_kwargs
+    )
+
+    async def _change_doc_index(hits, index):
+        async for h in hits:
+            h["_index"] = index
+            if "fields" in h:
+                h.update(h.pop("fields"))
+            yield h
+
+    kwargs = {"stats_only": True}
+    kwargs.update(bulk_kwargs)
+    return await async_bulk(
+        target_client,
+        _change_doc_index(docs, target_index),
+        chunk_size=chunk_size,
+        **kwargs
+    )
diff --git a/elasticsearch_7/_async/http_aiohttp.py b/elasticsearch_7/_async/http_aiohttp.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d34be8e010829105949a41d789b9d1964246ea1
--- /dev/null
+++ b/elasticsearch_7/_async/http_aiohttp.py
@@ -0,0 +1,311 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import asyncio
+import ssl
+import os
+import urllib3
+import warnings
+
+import aiohttp
+import yarl
+from aiohttp.client_exceptions import ServerFingerprintMismatch, ServerTimeoutError
+
+from .compat import get_running_loop
+from ..connection import Connection
+from ..compat import urlencode
+from ..exceptions import (
+    ConnectionError,
+    ConnectionTimeout,
+    ImproperlyConfigured,
+    SSLError,
+)
+
+
+# sentinel value for `verify_certs`.
+# This is used to detect if a user is passing in a value
+# for SSL kwargs if also using an SSLContext.
+VERIFY_CERTS_DEFAULT = object()
+SSL_SHOW_WARN_DEFAULT = object()
+
+CA_CERTS = None
+
+try:
+    import certifi
+
+    CA_CERTS = certifi.where()
+except ImportError:
+    pass
+
+
+class AIOHttpConnection(Connection):
+    def __init__(
+        self,
+        host="localhost",
+        port=None,
+        http_auth=None,
+        use_ssl=False,
+        verify_certs=VERIFY_CERTS_DEFAULT,
+        ssl_show_warn=SSL_SHOW_WARN_DEFAULT,
+        ca_certs=None,
+        client_cert=None,
+        client_key=None,
+        ssl_version=None,
+        ssl_assert_fingerprint=None,
+        maxsize=10,
+        headers=None,
+        ssl_context=None,
+        http_compress=None,
+        cloud_id=None,
+        api_key=None,
+        opaque_id=None,
+        loop=None,
+        **kwargs,
+    ):
+        """
+        Default connection class for ``AsyncElasticsearch`` using the `aiohttp` library and the http protocol.
+
+        :arg host: hostname of the node (default: localhost)
+        :arg port: port to use (integer, default: 9200)
+        :arg timeout: default timeout in seconds (float, default: 10)
+        :arg http_auth: optional http auth information as either ':' separated
+            string or a tuple
+        :arg use_ssl: use ssl for the connection if `True`
+        :arg verify_certs: whether to verify SSL certificates
+        :arg ssl_show_warn: show warning when verify certs is disabled
+        :arg ca_certs: optional path to CA bundle.
+            See https://urllib3.readthedocs.io/en/latest/security.html#using-certifi-with-urllib3
+            for instructions how to get default set
+        :arg client_cert: path to the file containing the private key and the
+            certificate, or cert only if using client_key
+        :arg client_key: path to the file containing the private key if using
+            separate cert and key files (client_cert will contain only the cert)
+        :arg ssl_version: version of the SSL protocol to use. Choices are:
+            SSLv23 (default) SSLv2 SSLv3 TLSv1 (see ``PROTOCOL_*`` constants in the
+            ``ssl`` module for exact options for your environment).
+        :arg ssl_assert_hostname: use hostname verification if not `False`
+        :arg ssl_assert_fingerprint: verify the supplied certificate fingerprint if not `None`
+        :arg maxsize: the number of connections which will be kept open to this
+            host. See https://urllib3.readthedocs.io/en/1.4/pools.html#api for more
+            information.
+        :arg headers: any custom http headers to be add to requests
+        :arg http_compress: Use gzip compression
+        :arg cloud_id: The Cloud ID from ElasticCloud. Convenient way to connect to cloud instances.
+            Other host connection params will be ignored.
+        :arg api_key: optional API Key authentication as either base64 encoded string or a tuple.
+        :arg opaque_id: Send this value in the 'X-Opaque-Id' HTTP header
+            For tracing all requests made by this transport.
+        :arg loop: asyncio Event Loop to use with aiohttp. This is set by default to the currently running loop.
+        """
+
+        self.headers = {}
+
+        super().__init__(
+            host=host,
+            port=port,
+            use_ssl=use_ssl,
+            headers=headers,
+            http_compress=http_compress,
+            cloud_id=cloud_id,
+            api_key=api_key,
+            opaque_id=opaque_id,
+            **kwargs,
+        )
+
+        if http_auth is not None:
+            if isinstance(http_auth, (tuple, list)):
+                http_auth = ":".join(http_auth)
+            self.headers.update(urllib3.make_headers(basic_auth=http_auth))
+
+        # if providing an SSL context, raise error if any other SSL related flag is used
+        if ssl_context and (
+            (verify_certs is not VERIFY_CERTS_DEFAULT)
+            or (ssl_show_warn is not SSL_SHOW_WARN_DEFAULT)
+            or ca_certs
+            or client_cert
+            or client_key
+            or ssl_version
+        ):
+            warnings.warn(
+                "When using `ssl_context`, all other SSL related kwargs are ignored"
+            )
+
+        self.ssl_assert_fingerprint = ssl_assert_fingerprint
+        if self.use_ssl and ssl_context is None:
+            ssl_context = ssl.SSLContext(ssl_version or ssl.PROTOCOL_TLS)
+
+            # Convert all sentinel values to their actual default
+            # values if not using an SSLContext.
+            if verify_certs is VERIFY_CERTS_DEFAULT:
+                verify_certs = True
+            if ssl_show_warn is SSL_SHOW_WARN_DEFAULT:
+                ssl_show_warn = True
+
+            if verify_certs:
+                ssl_context.verify_mode = ssl.CERT_REQUIRED
+                ssl_context.check_hostname = True
+            else:
+                ssl_context.verify_mode = ssl.CERT_NONE
+                ssl_context.check_hostname = False
+
+            ca_certs = CA_CERTS if ca_certs is None else ca_certs
+            if verify_certs:
+                if not ca_certs:
+                    raise ImproperlyConfigured(
+                        "Root certificates are missing for certificate "
+                        "validation. Either pass them in using the ca_certs parameter or "
+                        "install certifi to use it automatically."
+                    )
+            else:
+                if ssl_show_warn:
+                    warnings.warn(
+                        "Connecting to %s using SSL with verify_certs=False is insecure."
+                        % self.host
+                    )
+
+            if os.path.isfile(ca_certs):
+                ssl_context.load_verify_locations(cafile=ca_certs)
+            elif os.path.isdir(ca_certs):
+                ssl_context.load_verify_locations(capath=ca_certs)
+            else:
+                raise ImproperlyConfigured("ca_certs parameter is not a path")
+
+        self.headers.setdefault("connection", "keep-alive")
+        self.loop = loop
+        self.session = None
+
+        # Parameters for creating an aiohttp.ClientSession later.
+        self._limit = maxsize
+        self._http_auth = http_auth
+        self._ssl_context = ssl_context
+
+    async def perform_request(
+        self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None
+    ):
+        if self.session is None:
+            await self._create_aiohttp_session()
+
+        orig_body = body
+        url_path = url
+        if params:
+            query_string = urlencode(params)
+        else:
+            query_string = ""
+
+        # There is a bug in aiohttp that disables the re-use
+        # of the connection in the pool when method=HEAD.
+        # See: aio-libs/aiohttp#1769
+        is_head = False
+        if method == "HEAD":
+            method = "GET"
+            is_head = True
+
+        # Provide correct URL object to avoid string parsing in low-level code
+        url = yarl.URL.build(
+            scheme=self.scheme,
+            host=self.hostname,
+            port=self.port,
+            path=url,
+            query_string=query_string,
+            encoded=True,
+        )
+
+        timeout = aiohttp.ClientTimeout(
+            total=timeout if timeout is not None else self.timeout
+        )
+
+        req_headers = self.headers.copy()
+        if headers:
+            req_headers.update(headers)
+
+        if self.http_compress and body:
+            body = self._gzip_compress(body)
+            req_headers["content-encoding"] = "gzip"
+
+        start = self.loop.time()
+        try:
+            async with self.session.request(
+                method,
+                url,
+                data=body,
+                headers=req_headers,
+                timeout=timeout,
+                fingerprint=self.ssl_assert_fingerprint,
+            ) as response:
+                if is_head:  # We actually called 'GET' so throw away the data.
+                    await response.release()
+                    raw_data = ""
+                else:
+                    raw_data = await response.text()
+                duration = self.loop.time() - start
+
+        # We want to reraise a cancellation.
+        except asyncio.CancelledError:
+            raise
+
+        except Exception as e:
+            self.log_request_fail(
+                method, url, url_path, orig_body, self.loop.time() - start, exception=e
+            )
+            if isinstance(e, ServerFingerprintMismatch):
+                raise SSLError("N/A", str(e), e)
+            if isinstance(e, (asyncio.TimeoutError, ServerTimeoutError)):
+                raise ConnectionTimeout("TIMEOUT", str(e), e)
+            raise ConnectionError("N/A", str(e), e)
+
+        # raise warnings if any from the 'Warnings' header.
+        warning_headers = response.headers.getall("warning", ())
+        self._raise_warnings(warning_headers)
+
+        # raise errors based on http status codes, let the client handle those if needed
+        if not (200 <= response.status < 300) and response.status not in ignore:
+            self.log_request_fail(
+                method,
+                url,
+                url_path,
+                orig_body,
+                duration,
+                status_code=response.status,
+                response=raw_data,
+            )
+            self._raise_error(response.status, raw_data)
+
+        self.log_request_success(
+            method, url, url_path, orig_body, response.status, raw_data, duration
+        )
+
+        return response.status, response.headers, raw_data
+
+    async def close(self):
+        """
+        Explicitly closes connection
+        """
+        if self.session:
+            await self.session.close()
+
+    async def _create_aiohttp_session(self):
+        """Creates an aiohttp.ClientSession(). This is delayed until
+        the first call to perform_request() so that AsyncTransport has
+        a chance to set AIOHttpConnection.loop
+        """
+        if self.loop is None:
+            self.loop = get_running_loop()
+        self.session = aiohttp.ClientSession(
+            headers=self.headers,
+            auto_decompress=True,
+            loop=self.loop,
+            cookie_jar=aiohttp.DummyCookieJar(),
+            response_class=ESClientResponse,
+            connector=aiohttp.TCPConnector(
+                limit=self._limit, use_dns_cache=True, ssl=self._ssl_context,
+            ),
+        )
+
+
+class ESClientResponse(aiohttp.ClientResponse):
+    async def text(self, encoding=None, errors="strict"):
+        if self._body is None:
+            await self.read()
+
+        return self._body.decode("utf-8", "surrogatepass")
diff --git a/elasticsearch_7/_async/transport.py b/elasticsearch_7/_async/transport.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac5d0b2baafbed8d47f6c0c0ae1543acbc0f57d0
--- /dev/null
+++ b/elasticsearch_7/_async/transport.py
@@ -0,0 +1,331 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import asyncio
+import logging
+from itertools import chain
+
+from .compat import get_running_loop
+from .http_aiohttp import AIOHttpConnection
+from ..transport import Transport
+from ..exceptions import (
+    TransportError,
+    ConnectionTimeout,
+    ConnectionError,
+    SerializationError,
+)
+
+
+logger = logging.getLogger("elasticsearch")
+
+
+class AsyncTransport(Transport):
+    """
+    Encapsulation of transport-related to logic. Handles instantiation of the
+    individual connections as well as creating a connection pool to hold them.
+
+    Main interface is the `perform_request` method.
+    """
+
+    DEFAULT_CONNECTION_CLASS = AIOHttpConnection
+
+    def __init__(self, hosts, *args, sniff_on_start=False, **kwargs):
+        """
+        :arg hosts: list of dictionaries, each containing keyword arguments to
+            create a `connection_class` instance
+        :arg connection_class: subclass of :class:`~elasticsearch.Connection` to use
+        :arg connection_pool_class: subclass of :class:`~elasticsearch.ConnectionPool` to use
+        :arg host_info_callback: callback responsible for taking the node information from
+            `/_cluster/nodes`, along with already extracted information, and
+            producing a list of arguments (same as `hosts` parameter)
+        :arg sniff_on_start: flag indicating whether to obtain a list of nodes
+            from the cluster at startup time
+        :arg sniffer_timeout: number of seconds between automatic sniffs
+        :arg sniff_on_connection_fail: flag controlling if connection failure triggers a sniff
+        :arg sniff_timeout: timeout used for the sniff request - it should be a
+            fast api call and we are talking potentially to more nodes so we want
+            to fail quickly. Not used during initial sniffing (if
+            ``sniff_on_start`` is on) when the connection still isn't
+            initialized.
+        :arg serializer: serializer instance
+        :arg serializers: optional dict of serializer instances that will be
+            used for deserializing data coming from the server. (key is the mimetype)
+        :arg default_mimetype: when no mimetype is specified by the server
+            response assume this mimetype, defaults to `'application/json'`
+        :arg max_retries: maximum number of retries before an exception is propagated
+        :arg retry_on_status: set of HTTP status codes on which we should retry
+            on a different node. defaults to ``(502, 503, 504)``
+        :arg retry_on_timeout: should timeout trigger a retry on different
+            node? (default `False`)
+        :arg send_get_body_as: for GET requests with body this option allows
+            you to specify an alternate way of execution for environments that
+            don't support passing bodies with GET requests. If you set this to
+            'POST' a POST method will be used instead, if to 'source' then the body
+            will be serialized and passed as a query parameter `source`.
+
+        Any extra keyword arguments will be passed to the `connection_class`
+        when creating and instance unless overridden by that connection's
+        options provided as part of the hosts parameter.
+        """
+        self.sniffing_task = None
+        self.loop = None
+        self._async_init_called = False
+
+        super(AsyncTransport, self).__init__(
+            *args, hosts=[], sniff_on_start=False, **kwargs
+        )
+
+        # Don't enable sniffing on Cloud instances.
+        if kwargs.get("cloud_id", False):
+            sniff_on_start = False
+
+        # Since we defer connections / sniffing to not occur
+        # within the constructor we never want to signal to
+        # our parent to 'sniff_on_start' or non-empty 'hosts'.
+        self.hosts = hosts
+        self.sniff_on_start = sniff_on_start
+
+    async def _async_init(self):
+        """This is our stand-in for an async constructor. Everything
+        that was deferred within __init__() should be done here now.
+
+        This method will only be called once per AsyncTransport instance
+        and is called from one of AsyncElasticsearch.__aenter__(),
+        AsyncTransport.perform_request() or AsyncTransport.get_connection()
+        """
+        # Detect the async loop we're running in and set it
+        # on all already created HTTP connections.
+        self.loop = get_running_loop()
+        self.kwargs["loop"] = self.loop
+
+        # Now that we have a loop we can create all our HTTP connections
+        self.set_connections(self.hosts)
+        self.seed_connections = list(self.connection_pool.connections[:])
+
+        # ... and we can start sniffing in the background.
+        if self.sniffing_task is None and self.sniff_on_start:
+            self.last_sniff = self.loop.time()
+            self.create_sniff_task(initial=True)
+
+    async def _async_call(self):
+        """This method is called within any async method of AsyncTransport
+        where the transport is not closing. This will check to see if we should
+        call our _async_init() or create a new sniffing task
+        """
+        if not self._async_init_called:
+            self._async_init_called = True
+            await self._async_init()
+
+        if self.sniffer_timeout:
+            if self.loop.time() >= self.last_sniff + self.sniff_timeout:
+                self.create_sniff_task()
+
+    async def _get_node_info(self, conn, initial):
+        try:
+            # use small timeout for the sniffing request, should be a fast api call
+            _, headers, node_info = await conn.perform_request(
+                "GET",
+                "/_nodes/_all/http",
+                timeout=self.sniff_timeout if not initial else None,
+            )
+            return self.deserializer.loads(node_info, headers.get("content-type"))
+        except Exception:
+            pass
+        return None
+
+    async def _get_sniff_data(self, initial=False):
+        previous_sniff = self.last_sniff
+
+        # reset last_sniff timestamp
+        self.last_sniff = self.loop.time()
+
+        # use small timeout for the sniffing request, should be a fast api call
+        timeout = self.sniff_timeout if not initial else None
+
+        def _sniff_request(conn):
+            return self.loop.create_task(
+                conn.perform_request("GET", "/_nodes/_all/http", timeout=timeout)
+            )
+
+        # Go through all current connections as well as the
+        # seed_connections for good measure
+        tasks = []
+        for conn in self.connection_pool.connections:
+            tasks.append(_sniff_request(conn))
+        for conn in self.seed_connections:
+            # Ensure that we don't have any duplication within seed_connections.
+            if conn in self.connection_pool.connections:
+                continue
+            tasks.append(_sniff_request(conn))
+
+        done = ()
+        try:
+            while tasks:
+                # execute sniff requests in parallel, wait for first to return
+                done, tasks = await asyncio.wait(
+                    tasks, return_when=asyncio.FIRST_COMPLETED, loop=self.loop
+                )
+                # go through all the finished tasks
+                for t in done:
+                    try:
+                        _, headers, node_info = t.result()
+                        node_info = self.deserializer.loads(
+                            node_info, headers.get("content-type")
+                        )
+                    except (ConnectionError, SerializationError):
+                        continue
+                    node_info = list(node_info["nodes"].values())
+                    return node_info
+            else:
+                # no task has finished completely
+                raise TransportError("N/A", "Unable to sniff hosts.")
+        except Exception:
+            # keep the previous value on error
+            self.last_sniff = previous_sniff
+            raise
+        finally:
+            # Cancel all the pending tasks
+            for task in chain(done, tasks):
+                task.cancel()
+
+    async def sniff_hosts(self, initial=False):
+        """Either spawns a sniffing_task which does regular sniffing
+        over time or does a single sniffing session and awaits the results.
+        """
+        # Without a loop we can't do anything.
+        if not self.loop:
+            return
+
+        node_info = await self._get_sniff_data(initial)
+        hosts = list(filter(None, (self._get_host_info(n) for n in node_info)))
+
+        # we weren't able to get any nodes, maybe using an incompatible
+        # transport_schema or host_info_callback blocked all - raise error.
+        if not hosts:
+            raise TransportError(
+                "N/A", "Unable to sniff hosts - no viable hosts found."
+            )
+
+        # remember current live connections
+        orig_connections = self.connection_pool.connections[:]
+        self.set_connections(hosts)
+        # close those connections that are not in use any more
+        for c in orig_connections:
+            if c not in self.connection_pool.connections:
+                await c.close()
+
+    def create_sniff_task(self, initial=False):
+        """
+        Initiate a sniffing task. Make sure we only have one sniff request
+        running at any given time. If a finished sniffing request is around,
+        collect its result (which can raise its exception).
+        """
+        if self.sniffing_task and self.sniffing_task.done():
+            try:
+                if self.sniffing_task is not None:
+                    self.sniffing_task.result()
+            finally:
+                self.sniffing_task = None
+
+        if self.sniffing_task is None:
+            self.sniffing_task = self.loop.create_task(self.sniff_hosts(initial))
+
+    def mark_dead(self, connection):
+        """
+        Mark a connection as dead (failed) in the connection pool. If sniffing
+        on failure is enabled this will initiate the sniffing process.
+
+        :arg connection: instance of :class:`~elasticsearch.Connection` that failed
+        """
+        self.connection_pool.mark_dead(connection)
+        if self.sniff_on_connection_fail:
+            self.create_sniff_task()
+
+    def get_connection(self):
+        return self.connection_pool.get_connection()
+
+    async def perform_request(self, method, url, headers=None, params=None, body=None):
+        """
+        Perform the actual request. Retrieve a connection from the connection
+        pool, pass all the information to it's perform_request method and
+        return the data.
+
+        If an exception was raised, mark the connection as failed and retry (up
+        to `max_retries` times).
+
+        If the operation was successful and the connection used was previously
+        marked as dead, mark it as live, resetting it's failure count.
+
+        :arg method: HTTP method to use
+        :arg url: absolute url (without host) to target
+        :arg headers: dictionary of headers, will be handed over to the
+            underlying :class:`~elasticsearch.Connection` class
+        :arg params: dictionary of query parameters, will be handed over to the
+            underlying :class:`~elasticsearch.Connection` class for serialization
+        :arg body: body of the request, will be serialized using serializer and
+            passed to the connection
+        """
+        await self._async_call()
+
+        method, params, body, ignore, timeout = self._resolve_request_args(
+            method, params, body
+        )
+
+        for attempt in range(self.max_retries + 1):
+            connection = self.get_connection()
+
+            try:
+                status, headers, data = await connection.perform_request(
+                    method,
+                    url,
+                    params,
+                    body,
+                    headers=headers,
+                    ignore=ignore,
+                    timeout=timeout,
+                )
+            except TransportError as e:
+                if method == "HEAD" and e.status_code == 404:
+                    return False
+
+                retry = False
+                if isinstance(e, ConnectionTimeout):
+                    retry = self.retry_on_timeout
+                elif isinstance(e, ConnectionError):
+                    retry = True
+                elif e.status_code in self.retry_on_status:
+                    retry = True
+
+                if retry:
+                    # only mark as dead if we are retrying
+                    self.mark_dead(connection)
+                    # raise exception on last retry
+                    if attempt == self.max_retries:
+                        raise
+                else:
+                    raise
+
+            else:
+                if method == "HEAD":
+                    return 200 <= status < 300
+
+                # connection didn't fail, confirm it's live status
+                self.connection_pool.mark_live(connection)
+                if data:
+                    data = self.deserializer.loads(data, headers.get("content-type"))
+                return data
+
+    async def close(self):
+        """
+        Explicitly closes connections
+        """
+        if self.sniffing_task:
+            try:
+                self.sniffing_task.cancel()
+                await self.sniffing_task
+            except asyncio.CancelledError:
+                pass
+            self.sniffing_task = None
+        for connection in self.connection_pool.connections:
+            await connection.close()
diff --git a/elasticsearch_7/client/__init__.py b/elasticsearch_7/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..31073054ed40d11679844f15eeb256d76ced2646
--- /dev/null
+++ b/elasticsearch_7/client/__init__.py
@@ -0,0 +1,1999 @@
+# -*- coding: utf-8 -*-
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from __future__ import unicode_literals
+import logging
+
+from ..transport import Transport, TransportError
+from .indices import IndicesClient
+from .ingest import IngestClient
+from .cluster import ClusterClient
+from .cat import CatClient
+from .nodes import NodesClient
+from .remote import RemoteClient
+from .snapshot import SnapshotClient
+from .tasks import TasksClient
+from .xpack import XPackClient
+from .utils import query_params, _make_path, SKIP_IN_PATH, _bulk_body, _normalize_hosts
+
+# xpack APIs
+from .async_search import AsyncSearchClient
+from .autoscaling import AutoscalingClient
+from .ccr import CcrClient
+from .data_frame import Data_FrameClient
+from .deprecation import DeprecationClient
+from .eql import EqlClient
+from .graph import GraphClient
+from .ilm import IlmClient
+from .license import LicenseClient
+from .migration import MigrationClient
+from .ml import MlClient
+from .monitoring import MonitoringClient
+from .rollup import RollupClient
+from .security import SecurityClient
+from .sql import SqlClient
+from .ssl import SslClient
+from .watcher import WatcherClient
+from .enrich import EnrichClient
+from .searchable_snapshots import SearchableSnapshotsClient
+from .slm import SlmClient
+from .transform import TransformClient
+
+
+logger = logging.getLogger("elasticsearch")
+
+
+class Elasticsearch(object):
+    """
+    Elasticsearch low-level client. Provides a straightforward mapping from
+    Python to ES REST endpoints.
+
+    The instance has attributes ``cat``, ``cluster``, ``indices``, ``ingest``,
+    ``nodes``, ``snapshot`` and ``tasks`` that provide access to instances of
+    :class:`~elasticsearch.client.CatClient`,
+    :class:`~elasticsearch.client.ClusterClient`,
+    :class:`~elasticsearch.client.IndicesClient`,
+    :class:`~elasticsearch.client.IngestClient`,
+    :class:`~elasticsearch.client.NodesClient`,
+    :class:`~elasticsearch.client.SnapshotClient` and
+    :class:`~elasticsearch.client.TasksClient` respectively. This is the
+    preferred (and only supported) way to get access to those classes and their
+    methods.
+
+    You can specify your own connection class which should be used by providing
+    the ``connection_class`` parameter::
+
+        # create connection to localhost using the ThriftConnection
+        es = Elasticsearch(connection_class=ThriftConnection)
+
+    If you want to turn on :ref:`sniffing` you have several options (described
+    in :class:`~elasticsearch.Transport`)::
+
+        # create connection that will automatically inspect the cluster to get
+        # the list of active nodes. Start with nodes running on 'esnode1' and
+        # 'esnode2'
+        es = Elasticsearch(
+            ['esnode1', 'esnode2'],
+            # sniff before doing anything
+            sniff_on_start=True,
+            # refresh nodes after a node fails to respond
+            sniff_on_connection_fail=True,
+            # and also every 60 seconds
+            sniffer_timeout=60
+        )
+
+    Different hosts can have different parameters, use a dictionary per node to
+    specify those::
+
+        # connect to localhost directly and another node using SSL on port 443
+        # and an url_prefix. Note that ``port`` needs to be an int.
+        es = Elasticsearch([
+            {'host': 'localhost'},
+            {'host': 'othernode', 'port': 443, 'url_prefix': 'es', 'use_ssl': True},
+        ])
+
+    If using SSL, there are several parameters that control how we deal with
+    certificates (see :class:`~elasticsearch.Urllib3HttpConnection` for
+    detailed description of the options)::
+
+        es = Elasticsearch(
+            ['localhost:443', 'other_host:443'],
+            # turn on SSL
+            use_ssl=True,
+            # make sure we verify SSL certificates
+            verify_certs=True,
+            # provide a path to CA certs on disk
+            ca_certs='/path/to/CA_certs'
+        )
+
+    If using SSL, but don't verify the certs, a warning message is showed
+    optionally (see :class:`~elasticsearch.Urllib3HttpConnection` for
+    detailed description of the options)::
+
+        es = Elasticsearch(
+            ['localhost:443', 'other_host:443'],
+            # turn on SSL
+            use_ssl=True,
+            # no verify SSL certificates
+            verify_certs=False,
+            # don't show warnings about ssl certs verification
+            ssl_show_warn=False
+        )
+
+    SSL client authentication is supported
+    (see :class:`~elasticsearch.Urllib3HttpConnection` for
+    detailed description of the options)::
+
+        es = Elasticsearch(
+            ['localhost:443', 'other_host:443'],
+            # turn on SSL
+            use_ssl=True,
+            # make sure we verify SSL certificates
+            verify_certs=True,
+            # provide a path to CA certs on disk
+            ca_certs='/path/to/CA_certs',
+            # PEM formatted SSL client certificate
+            client_cert='/path/to/clientcert.pem',
+            # PEM formatted SSL client key
+            client_key='/path/to/clientkey.pem'
+        )
+
+    Alternatively you can use RFC-1738 formatted URLs, as long as they are not
+    in conflict with other options::
+
+        es = Elasticsearch(
+            [
+                'http://user:secret@localhost:9200/',
+                'https://user:secret@other_host:443/production'
+            ],
+            verify_certs=True
+        )
+
+    By default, `JSONSerializer
+    <https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/serializer.py#L24>`_
+    is used to encode all outgoing requests.
+    However, you can implement your own custom serializer::
+
+        from elasticsearch.serializer import JSONSerializer
+
+        class SetEncoder(JSONSerializer):
+            def default(self, obj):
+                if isinstance(obj, set):
+                    return list(obj)
+                if isinstance(obj, Something):
+                    return 'CustomSomethingRepresentation'
+                return JSONSerializer.default(self, obj)
+
+        es = Elasticsearch(serializer=SetEncoder())
+
+    """
+
+    def __init__(self, hosts=None, transport_class=Transport, **kwargs):
+        """
+        :arg hosts: list of nodes, or a single node, we should connect to.
+            Node should be a dictionary ({"host": "localhost", "port": 9200}),
+            the entire dictionary will be passed to the :class:`~elasticsearch.Connection`
+            class as kwargs, or a string in the format of ``host[:port]`` which will be
+            translated to a dictionary automatically.  If no value is given the
+            :class:`~elasticsearch.Connection` class defaults will be used.
+
+        :arg transport_class: :class:`~elasticsearch.Transport` subclass to use.
+
+        :arg kwargs: any additional arguments will be passed on to the
+            :class:`~elasticsearch.Transport` class and, subsequently, to the
+            :class:`~elasticsearch.Connection` instances.
+        """
+        self.transport = transport_class(_normalize_hosts(hosts), **kwargs)
+
+        # namespaced clients for compatibility with API names
+        self.indices = IndicesClient(self)
+        self.ingest = IngestClient(self)
+        self.cluster = ClusterClient(self)
+        self.cat = CatClient(self)
+        self.nodes = NodesClient(self)
+        self.remote = RemoteClient(self)
+        self.snapshot = SnapshotClient(self)
+        self.tasks = TasksClient(self)
+
+        self.xpack = XPackClient(self)
+        self.async_search = AsyncSearchClient(self)
+        self.autoscaling = AutoscalingClient(self)
+        self.ccr = CcrClient(self)
+        self.data_frame = Data_FrameClient(self)
+        self.deprecation = DeprecationClient(self)
+        self.eql = EqlClient(self)
+        self.graph = GraphClient(self)
+        self.ilm = IlmClient(self)
+        self.indices = IndicesClient(self)
+        self.license = LicenseClient(self)
+        self.migration = MigrationClient(self)
+        self.ml = MlClient(self)
+        self.monitoring = MonitoringClient(self)
+        self.rollup = RollupClient(self)
+        self.security = SecurityClient(self)
+        self.sql = SqlClient(self)
+        self.ssl = SslClient(self)
+        self.watcher = WatcherClient(self)
+        self.enrich = EnrichClient(self)
+        self.searchable_snapshots = SearchableSnapshotsClient(self)
+        self.slm = SlmClient(self)
+        self.transform = TransformClient(self)
+
+    def __repr__(self):
+        try:
+            # get a list of all connections
+            cons = self.transport.hosts
+            # truncate to 5 if there are too many
+            if len(cons) > 5:
+                cons = cons[:5] + ["..."]
+            return "<{cls}({cons})>".format(cls=self.__class__.__name__, cons=cons)
+        except Exception:
+            # probably operating on custom transport and connection_pool, ignore
+            return super(Elasticsearch, self).__repr__()
+
+    def __enter__(self):
+        if hasattr(self.transport, "_async_call"):
+            self.transport._async_call()
+        return self
+
+    def __exit__(self, *_):
+        self.close()
+
+    def close(self):
+        """Closes the Transport and all internal connections"""
+        self.transport.close()
+
+    # AUTO-GENERATED-API-DEFINITIONS #
+    @query_params()
+    def ping(self, params=None, headers=None):
+        """
+        Returns whether the cluster is running.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/index.html>`_
+        """
+        try:
+            return self.transport.perform_request(
+                "HEAD", "/", params=params, headers=headers
+            )
+        except TransportError:
+            return False
+
+    @query_params()
+    def info(self, params=None, headers=None):
+        """
+        Returns basic information about the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/index.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/", params=params, headers=headers
+        )
+
+    @query_params(
+        "pipeline",
+        "refresh",
+        "routing",
+        "timeout",
+        "version",
+        "version_type",
+        "wait_for_active_shards",
+    )
+    def create(self, index, id, body, doc_type=None, params=None, headers=None):
+        """
+        Creates a new document in the index.  Returns a 409 response when a document
+        with a same ID already exists in the index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-index_.html>`_
+
+        :arg index: The name of the index
+        :arg id: Document ID
+        :arg body: The document
+        :arg doc_type: The type of the document
+        :arg pipeline: The pipeline id to preprocess incoming documents
+            with
+        :arg refresh: If `true` then refresh the affected shards to make
+            this operation visible to search, if `wait_for` then wait for a refresh
+            to make this operation visible to search, if `false` (the default) then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the index operation. Defaults
+            to 1, meaning the primary shard only. Set to `all` for all shard copies,
+            otherwise set to any non-negative value less than or equal to the total
+            number of copies for the shard (number of replicas + 1)
+        """
+        for param in (index, id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_create", id)
+        else:
+            path = _make_path(index, doc_type, id, "_create")
+
+        return self.transport.perform_request(
+            "PUT", path, params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "if_primary_term",
+        "if_seq_no",
+        "op_type",
+        "pipeline",
+        "refresh",
+        "routing",
+        "timeout",
+        "version",
+        "version_type",
+        "wait_for_active_shards",
+    )
+    def index(self, index, body, doc_type=None, id=None, params=None, headers=None):
+        """
+        Creates or updates a document in an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-index_.html>`_
+
+        :arg index: The name of the index
+        :arg body: The document
+        :arg doc_type: The type of the document
+        :arg id: Document ID
+        :arg if_primary_term: only perform the index operation if the
+            last operation that has changed the document has the specified primary
+            term
+        :arg if_seq_no: only perform the index operation if the last
+            operation that has changed the document has the specified sequence
+            number
+        :arg op_type: Explicit operation type. Defaults to `index` for
+            requests with an explicit document ID, and to `create`for requests
+            without an explicit document ID  Valid choices: index, create
+        :arg pipeline: The pipeline id to preprocess incoming documents
+            with
+        :arg refresh: If `true` then refresh the affected shards to make
+            this operation visible to search, if `wait_for` then wait for a refresh
+            to make this operation visible to search, if `false` (the default) then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the index operation. Defaults
+            to 1, meaning the primary shard only. Set to `all` for all shard copies,
+            otherwise set to any non-negative value less than or equal to the total
+            number of copies for the shard (number of replicas + 1)
+        """
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type is None:
+            doc_type = "_doc"
+
+        return self.transport.perform_request(
+            "POST" if id in SKIP_IN_PATH else "PUT",
+            _make_path(index, doc_type, id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "pipeline",
+        "refresh",
+        "routing",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    def bulk(self, body, index=None, doc_type=None, params=None, headers=None):
+        """
+        Allows to perform multiple index/update/delete operations in a single request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-bulk.html>`_
+
+        :arg body: The operation definition and data (action-data
+            pairs), separated by newlines
+        :arg index: Default index for items which don't provide one
+        :arg doc_type: Default document type for items which don't
+            provide one
+        :arg _source: True or false to return the _source field or not,
+            or default list of fields to return, can be overridden on each sub-
+            request
+        :arg _source_excludes: Default list of fields to exclude from
+            the returned _source field, can be overridden on each sub-request
+        :arg _source_includes: Default list of fields to extract and
+            return from the _source field, can be overridden on each sub-request
+        :arg pipeline: The pipeline id to preprocess incoming documents
+            with
+        :arg refresh: If `true` then refresh the affected shards to make
+            this operation visible to search, if `wait_for` then wait for a refresh
+            to make this operation visible to search, if `false` (the default) then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the bulk operation. Defaults
+            to 1, meaning the primary shard only. Set to `all` for all shard copies,
+            otherwise set to any non-negative value less than or equal to the total
+            number of copies for the shard (number of replicas + 1)
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_bulk"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def clear_scroll(self, body=None, scroll_id=None, params=None, headers=None):
+        """
+        Explicitly clears the search context for a scroll.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-request-body.html#_clear_scroll_api>`_
+
+        :arg body: A comma-separated list of scroll IDs to clear if none
+            was specified via the scroll_id parameter
+        :arg scroll_id: A comma-separated list of scroll IDs to clear
+        """
+        if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH:
+            raise ValueError("You need to supply scroll_id or body.")
+        elif scroll_id and not body:
+            body = {"scroll_id": [scroll_id]}
+        elif scroll_id:
+            params["scroll_id"] = scroll_id
+
+        return self.transport.perform_request(
+            "DELETE", "/_search/scroll", params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "analyze_wildcard",
+        "analyzer",
+        "default_operator",
+        "df",
+        "expand_wildcards",
+        "ignore_throttled",
+        "ignore_unavailable",
+        "lenient",
+        "min_score",
+        "preference",
+        "q",
+        "routing",
+        "terminate_after",
+    )
+    def count(self, body=None, index=None, doc_type=None, params=None, headers=None):
+        """
+        Returns number of documents matching a query.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-count.html>`_
+
+        :arg body: A query to restrict the results specified with the
+            Query DSL (optional)
+        :arg index: A comma-separated list of indices to restrict the
+            results
+        :arg doc_type: A comma-separated list of types to restrict the
+            results
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_throttled: Whether specified concrete, expanded or
+            aliased indices should be ignored when throttled
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg min_score: Include only documents with a specific `_score`
+            value in the result
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg routing: A comma-separated list of specific routing values
+        :arg terminate_after: The maximum count for each shard, upon
+            reaching which the query execution will terminate early
+        """
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_count"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "if_primary_term",
+        "if_seq_no",
+        "refresh",
+        "routing",
+        "timeout",
+        "version",
+        "version_type",
+        "wait_for_active_shards",
+    )
+    def delete(self, index, id, doc_type=None, params=None, headers=None):
+        """
+        Removes a document from the index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-delete.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg doc_type: The type of the document
+        :arg if_primary_term: only perform the delete operation if the
+            last operation that has changed the document has the specified primary
+            term
+        :arg if_seq_no: only perform the delete operation if the last
+            operation that has changed the document has the specified sequence
+            number
+        :arg refresh: If `true` then refresh the affected shards to make
+            this operation visible to search, if `wait_for` then wait for a refresh
+            to make this operation visible to search, if `false` (the default) then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the delete operation.
+            Defaults to 1, meaning the primary shard only. Set to `all` for all
+            shard copies, otherwise set to any non-negative value less than or equal
+            to the total number of copies for the shard (number of replicas + 1)
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            doc_type = "_doc"
+
+        return self.transport.perform_request(
+            "DELETE", _make_path(index, doc_type, id), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "allow_no_indices",
+        "analyze_wildcard",
+        "analyzer",
+        "conflicts",
+        "default_operator",
+        "df",
+        "expand_wildcards",
+        "from_",
+        "ignore_unavailable",
+        "lenient",
+        "max_docs",
+        "preference",
+        "q",
+        "refresh",
+        "request_cache",
+        "requests_per_second",
+        "routing",
+        "scroll",
+        "scroll_size",
+        "search_timeout",
+        "search_type",
+        "size",
+        "slices",
+        "sort",
+        "stats",
+        "terminate_after",
+        "timeout",
+        "version",
+        "wait_for_active_shards",
+        "wait_for_completion",
+    )
+    def delete_by_query(self, index, body, doc_type=None, params=None, headers=None):
+        """
+        Deletes documents matching the provided query.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-delete-by-query.html>`_
+
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg body: The search definition using the Query DSL
+        :arg doc_type: A comma-separated list of document types to
+            search; leave empty to perform the operation on all types
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg conflicts: What to do when the delete by query hits version
+            conflicts?  Valid choices: abort, proceed  Default: abort
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg from_: Starting offset (default: 0)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg max_docs: Maximum number of documents to process (default:
+            all documents)
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg refresh: Should the effected indexes be refreshed?
+        :arg request_cache: Specify if request cache should be used for
+            this request or not, defaults to index level setting
+        :arg requests_per_second: The throttle for this request in sub-
+            requests per second. -1 means no throttle.
+        :arg routing: A comma-separated list of specific routing values
+        :arg scroll: Specify how long a consistent view of the index
+            should be maintained for scrolled search
+        :arg scroll_size: Size on the scroll request powering the delete
+            by query  Default: 100
+        :arg search_timeout: Explicit timeout for each search request.
+            Defaults to no timeout.
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, dfs_query_then_fetch
+        :arg size: Deprecated, please use `max_docs` instead
+        :arg slices: The number of slices this task should be divided
+            into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be
+            set to `auto`.  Default: 1
+        :arg sort: A comma-separated list of <field>:<direction> pairs
+        :arg stats: Specific 'tag' of the request for logging and
+            statistical purposes
+        :arg terminate_after: The maximum number of documents to collect
+            for each shard, upon reaching which the query execution will terminate
+            early.
+        :arg timeout: Time each individual bulk request should wait for
+            shards that are unavailable.  Default: 1m
+        :arg version: Specify whether to return document version as part
+            of a hit
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the delete by query
+            operation. Defaults to 1, meaning the primary shard only. Set to `all`
+            for all shard copies, otherwise set to any non-negative value less than
+            or equal to the total number of copies for the shard (number of replicas
+            + 1)
+        :arg wait_for_completion: Should the request should block until
+            the delete by query is complete.  Default: True
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_delete_by_query"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("requests_per_second")
+    def delete_by_query_rethrottle(self, task_id, params=None, headers=None):
+        """
+        Changes the number of requests per second for a particular Delete By Query
+        operation.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-delete-by-query.html>`_
+
+        :arg task_id: The task id to rethrottle
+        :arg requests_per_second: The throttle to set on this request in
+            floating sub-requests per second. -1 means set no throttle.
+        """
+        if task_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'task_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_delete_by_query", task_id, "_rethrottle"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "timeout")
+    def delete_script(self, id, params=None, headers=None):
+        """
+        Deletes a script.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-scripting.html>`_
+
+        :arg id: Script ID
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "DELETE", _make_path("_scripts", id), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "preference",
+        "realtime",
+        "refresh",
+        "routing",
+        "stored_fields",
+        "version",
+        "version_type",
+    )
+    def exists(self, index, id, doc_type=None, params=None, headers=None):
+        """
+        Returns information about whether a document exists in an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-get.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg doc_type: The type of the document (use `_all` to fetch the
+            first document matching the ID across all types)
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg realtime: Specify whether to perform the operation in
+            realtime or search mode
+        :arg refresh: Refresh the shard containing the document before
+            performing the operation
+        :arg routing: Specific routing value
+        :arg stored_fields: A comma-separated list of stored fields to
+            return in the response
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            doc_type = "_doc"
+
+        return self.transport.perform_request(
+            "HEAD", _make_path(index, doc_type, id), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "preference",
+        "realtime",
+        "refresh",
+        "routing",
+        "version",
+        "version_type",
+    )
+    def exists_source(self, index, id, doc_type=None, params=None, headers=None):
+        """
+        Returns information about whether a document source exists in an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-get.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg doc_type: The type of the document; deprecated and optional
+            starting with 7.0
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg realtime: Specify whether to perform the operation in
+            realtime or search mode
+        :arg refresh: Refresh the shard containing the document before
+            performing the operation
+        :arg routing: Specific routing value
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_source", id)
+        else:
+            path = _make_path(index, doc_type, id, "_source")
+
+        return self.transport.perform_request(
+            "HEAD", path, params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "analyze_wildcard",
+        "analyzer",
+        "default_operator",
+        "df",
+        "lenient",
+        "preference",
+        "q",
+        "routing",
+        "stored_fields",
+    )
+    def explain(self, index, id, body=None, doc_type=None, params=None, headers=None):
+        """
+        Returns information about why a specific matches (or doesn't match) a query.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-explain.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg body: The query definition using the Query DSL
+        :arg doc_type: The type of the document
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg analyze_wildcard: Specify whether wildcards and prefix
+            queries in the query string query should be analyzed (default: false)
+        :arg analyzer: The analyzer for the query string query
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The default field for query string query (default:
+            _all)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg routing: Specific routing value
+        :arg stored_fields: A comma-separated list of stored fields to
+            return in the response
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_explain", id)
+        else:
+            path = _make_path(index, doc_type, id, "_explain")
+
+        return self.transport.perform_request(
+            "POST", path, params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "fields",
+        "ignore_unavailable",
+        "include_unmapped",
+    )
+    def field_caps(self, index=None, params=None, headers=None):
+        """
+        Returns the information about the capabilities of fields among multiple
+        indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-field-caps.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg fields: A comma-separated list of field names
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg include_unmapped: Indicates whether unmapped fields should
+            be included in the response.
+        """
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_field_caps"), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "preference",
+        "realtime",
+        "refresh",
+        "routing",
+        "stored_fields",
+        "version",
+        "version_type",
+    )
+    def get(self, index, id, doc_type=None, params=None, headers=None):
+        """
+        Returns a document.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-get.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg doc_type: The type of the document (use `_all` to fetch the
+            first document matching the ID across all types)
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg realtime: Specify whether to perform the operation in
+            realtime or search mode
+        :arg refresh: Refresh the shard containing the document before
+            performing the operation
+        :arg routing: Specific routing value
+        :arg stored_fields: A comma-separated list of stored fields to
+            return in the response
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            doc_type = "_doc"
+
+        return self.transport.perform_request(
+            "GET", _make_path(index, doc_type, id), params=params, headers=headers
+        )
+
+    @query_params("master_timeout")
+    def get_script(self, id, params=None, headers=None):
+        """
+        Returns a script.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-scripting.html>`_
+
+        :arg id: Script ID
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "GET", _make_path("_scripts", id), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "preference",
+        "realtime",
+        "refresh",
+        "routing",
+        "version",
+        "version_type",
+    )
+    def get_source(self, index, id, doc_type=None, params=None, headers=None):
+        """
+        Returns the source of a document.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-get.html>`_
+
+        :arg index: The name of the index
+        :arg id: The document ID
+        :arg doc_type: The type of the document; deprecated and optional
+            starting with 7.0
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg realtime: Specify whether to perform the operation in
+            realtime or search mode
+        :arg refresh: Refresh the shard containing the document before
+            performing the operation
+        :arg routing: Specific routing value
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        for param in (index, id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_source", id)
+        else:
+            path = _make_path(index, doc_type, id, "_source")
+
+        return self.transport.perform_request(
+            "GET", path, params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "preference",
+        "realtime",
+        "refresh",
+        "routing",
+        "stored_fields",
+    )
+    def mget(self, body, index=None, doc_type=None, params=None, headers=None):
+        """
+        Allows to get multiple documents in one request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-multi-get.html>`_
+
+        :arg body: Document identifiers; can be either `docs`
+            (containing full document information) or `ids` (when index and type is
+            provided in the URL.
+        :arg index: The name of the index
+        :arg doc_type: The type of the document
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg realtime: Specify whether to perform the operation in
+            realtime or search mode
+        :arg refresh: Refresh the shard containing the document before
+            performing the operation
+        :arg routing: Specific routing value
+        :arg stored_fields: A comma-separated list of stored fields to
+            return in the response
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_mget"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "ccs_minimize_roundtrips",
+        "max_concurrent_searches",
+        "max_concurrent_shard_requests",
+        "pre_filter_shard_size",
+        "rest_total_hits_as_int",
+        "search_type",
+        "typed_keys",
+    )
+    def msearch(self, body, index=None, doc_type=None, params=None, headers=None):
+        """
+        Allows to execute several search operations in one request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-multi-search.html>`_
+
+        :arg body: The request definitions (metadata-search request
+            definition pairs), separated by newlines
+        :arg index: A comma-separated list of index names to use as
+            default
+        :arg doc_type: A comma-separated list of document types to use
+            as default
+        :arg ccs_minimize_roundtrips: Indicates whether network round-
+            trips should be minimized as part of cross-cluster search requests
+            execution  Default: true
+        :arg max_concurrent_searches: Controls the maximum number of
+            concurrent searches the multi search api will execute
+        :arg max_concurrent_shard_requests: The number of concurrent
+            shard requests each sub search executes concurrently per node. This
+            value should be used to limit the impact of the search on the cluster in
+            order to limit the number of concurrent shard requests  Default: 5
+        :arg pre_filter_shard_size: A threshold that enforces a pre-
+            filter roundtrip to prefilter search shards based on query rewriting if
+            theƂ number of shards the search request expands to exceeds the
+            threshold. This filter roundtrip can limit the number of shards
+            significantly if for instance a shard can not match any documents based
+            on its rewrite method ie. if date filters are mandatory to match but the
+            shard bounds and the query are disjoint.
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, query_and_fetch, dfs_query_then_fetch,
+            dfs_query_and_fetch
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_msearch"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("master_timeout", "timeout")
+    def put_script(self, id, body, context=None, params=None, headers=None):
+        """
+        Creates or updates a script.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-scripting.html>`_
+
+        :arg id: Script ID
+        :arg body: The document
+        :arg context: Context name to compile script against
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        for param in (id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_scripts", id, context),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "allow_no_indices", "expand_wildcards", "ignore_unavailable", "search_type"
+    )
+    def rank_eval(self, body, index=None, params=None, headers=None):
+        """
+        Allows to evaluate the quality of ranked search results over a set of typical
+        search queries
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-rank-eval.html>`_
+
+        :arg body: The ranking evaluation search definition, including
+            search requests, document ratings and ranking metric definition.
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, dfs_query_then_fetch
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, "_rank_eval"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "max_docs",
+        "refresh",
+        "requests_per_second",
+        "scroll",
+        "slices",
+        "timeout",
+        "wait_for_active_shards",
+        "wait_for_completion",
+    )
+    def reindex(self, body, params=None, headers=None):
+        """
+        Allows to copy documents from one index to another, optionally filtering the
+        source documents by a query, changing the destination index settings, or
+        fetching the documents from a remote cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-reindex.html>`_
+
+        :arg body: The search definition using the Query DSL and the
+            prototype for the index request.
+        :arg max_docs: Maximum number of documents to process (default:
+            all documents)
+        :arg refresh: Should the affected indexes be refreshed?
+        :arg requests_per_second: The throttle to set on this request in
+            sub-requests per second. -1 means no throttle.
+        :arg scroll: Control how long to keep the search context alive
+            Default: 5m
+        :arg slices: The number of slices this task should be divided
+            into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be
+            set to `auto`.  Default: 1
+        :arg timeout: Time each individual bulk request should wait for
+            shards that are unavailable.  Default: 1m
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the reindex operation.
+            Defaults to 1, meaning the primary shard only. Set to `all` for all
+            shard copies, otherwise set to any non-negative value less than or equal
+            to the total number of copies for the shard (number of replicas + 1)
+        :arg wait_for_completion: Should the request should block until
+            the reindex is complete.  Default: True
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST", "/_reindex", params=params, headers=headers, body=body
+        )
+
+    @query_params("requests_per_second")
+    def reindex_rethrottle(self, task_id, params=None, headers=None):
+        """
+        Changes the number of requests per second for a particular Reindex operation.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-reindex.html>`_
+
+        :arg task_id: The task id to rethrottle
+        :arg requests_per_second: The throttle to set on this request in
+            floating sub-requests per second. -1 means set no throttle.
+        """
+        if task_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'task_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_reindex", task_id, "_rethrottle"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def render_search_template(self, body=None, id=None, params=None, headers=None):
+        """
+        Allows to use the Mustache language to pre-render a search definition.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-template.html#_validating_templates>`_
+
+        :arg body: The search definition template and its params
+        :arg id: The id of the stored search template
+        """
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_render", "template", id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def scripts_painless_execute(self, body=None, params=None, headers=None):
+        """
+        Allows an arbitrary script to be executed and a result to be returned
+        `<https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html>`_
+
+        :arg body: The script to execute
+        """
+        return self.transport.perform_request(
+            "POST",
+            "/_scripts/painless/_execute",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("rest_total_hits_as_int", "scroll")
+    def scroll(self, body=None, scroll_id=None, params=None, headers=None):
+        """
+        Allows to retrieve a large numbers of results from a single search request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-request-body.html#request-body-search-scroll>`_
+
+        :arg body: The scroll ID if not passed by URL or query
+            parameter.
+        :arg scroll_id: The scroll ID for scrolled search
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg scroll: Specify how long a consistent view of the index
+            should be maintained for scrolled search
+        """
+        if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH:
+            raise ValueError("You need to supply scroll_id or body.")
+        elif scroll_id and not body:
+            body = {"scroll_id": scroll_id}
+        elif scroll_id:
+            params["scroll_id"] = scroll_id
+
+        return self.transport.perform_request(
+            "POST", "/_search/scroll", params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "allow_no_indices",
+        "allow_partial_search_results",
+        "analyze_wildcard",
+        "analyzer",
+        "batched_reduce_size",
+        "ccs_minimize_roundtrips",
+        "default_operator",
+        "df",
+        "docvalue_fields",
+        "expand_wildcards",
+        "explain",
+        "from_",
+        "ignore_throttled",
+        "ignore_unavailable",
+        "lenient",
+        "max_concurrent_shard_requests",
+        "pre_filter_shard_size",
+        "preference",
+        "q",
+        "request_cache",
+        "rest_total_hits_as_int",
+        "routing",
+        "scroll",
+        "search_type",
+        "seq_no_primary_term",
+        "size",
+        "sort",
+        "stats",
+        "stored_fields",
+        "suggest_field",
+        "suggest_mode",
+        "suggest_size",
+        "suggest_text",
+        "terminate_after",
+        "timeout",
+        "track_scores",
+        "track_total_hits",
+        "typed_keys",
+        "version",
+    )
+    def search(self, body=None, index=None, doc_type=None, params=None, headers=None):
+        """
+        Returns results matching a query.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-search.html>`_
+
+        :arg body: The search definition using the Query DSL
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg doc_type: A comma-separated list of document types to
+            search; leave empty to perform the operation on all types
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg allow_partial_search_results: Indicate if an error should
+            be returned if there is a partial search failure or timeout  Default:
+            True
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg batched_reduce_size: The number of shard results that
+            should be reduced at once on the coordinating node. This value should be
+            used as a protection mechanism to reduce the memory overhead per search
+            request if the potential number of shards in the request can be large.
+            Default: 512
+        :arg ccs_minimize_roundtrips: Indicates whether network round-
+            trips should be minimized as part of cross-cluster search requests
+            execution  Default: true
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg docvalue_fields: A comma-separated list of fields to return
+            as the docvalue representation of a field for each hit
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg explain: Specify whether to return detailed information
+            about score computation as part of a hit
+        :arg from_: Starting offset (default: 0)
+        :arg ignore_throttled: Whether specified concrete, expanded or
+            aliased indices should be ignored when throttled
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg max_concurrent_shard_requests: The number of concurrent
+            shard requests per node this search executes concurrently. This value
+            should be used to limit the impact of the search on the cluster in order
+            to limit the number of concurrent shard requests  Default: 5
+        :arg pre_filter_shard_size: A threshold that enforces a pre-
+            filter roundtrip to prefilter search shards based on query rewriting if
+            theƂ number of shards the search request expands to exceeds the
+            threshold. This filter roundtrip can limit the number of shards
+            significantly if for instance a shard can not match any documents based
+            on its rewrite method ie. if date filters are mandatory to match but the
+            shard bounds and the query are disjoint.
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg request_cache: Specify if request cache should be used for
+            this request or not, defaults to index level setting
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg routing: A comma-separated list of specific routing values
+        :arg scroll: Specify how long a consistent view of the index
+            should be maintained for scrolled search
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, dfs_query_then_fetch
+        :arg seq_no_primary_term: Specify whether to return sequence
+            number and primary term of the last modification of each hit
+        :arg size: Number of hits to return (default: 10)
+        :arg sort: A comma-separated list of <field>:<direction> pairs
+        :arg stats: Specific 'tag' of the request for logging and
+            statistical purposes
+        :arg stored_fields: A comma-separated list of stored fields to
+            return as part of a hit
+        :arg suggest_field: Specify which field to use for suggestions
+        :arg suggest_mode: Specify suggest mode  Valid choices: missing,
+            popular, always  Default: missing
+        :arg suggest_size: How many suggestions to return in response
+        :arg suggest_text: The source text for which the suggestions
+            should be returned
+        :arg terminate_after: The maximum number of documents to collect
+            for each shard, upon reaching which the query execution will terminate
+            early.
+        :arg timeout: Explicit operation timeout
+        :arg track_scores: Whether to calculate and return scores even
+            if they are not used for sorting
+        :arg track_total_hits: Indicate if the number of documents that
+            match the query should be tracked
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        :arg version: Specify whether to return document version as part
+            of a hit
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_search"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "local",
+        "preference",
+        "routing",
+    )
+    def search_shards(self, index=None, params=None, headers=None):
+        """
+        Returns information about the indices and shards that a search request would be
+        executed against.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-shards.html>`_
+
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg routing: Specific routing value
+        """
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_search_shards"), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "if_primary_term",
+        "if_seq_no",
+        "lang",
+        "refresh",
+        "retry_on_conflict",
+        "routing",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    def update(self, index, id, body, doc_type=None, params=None, headers=None):
+        """
+        Updates a document with a script or partial document.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-update.html>`_
+
+        :arg index: The name of the index
+        :arg id: Document ID
+        :arg body: The request definition requires either `script` or
+            partial `doc`
+        :arg doc_type: The type of the document
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg if_primary_term: only perform the update operation if the
+            last operation that has changed the document has the specified primary
+            term
+        :arg if_seq_no: only perform the update operation if the last
+            operation that has changed the document has the specified sequence
+            number
+        :arg lang: The script language (default: painless)
+        :arg refresh: If `true` then refresh the affected shards to make
+            this operation visible to search, if `wait_for` then wait for a refresh
+            to make this operation visible to search, if `false` (the default) then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        :arg retry_on_conflict: Specify how many times should the
+            operation be retried when a conflict occurs (default: 0)
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the update operation.
+            Defaults to 1, meaning the primary shard only. Set to `all` for all
+            shard copies, otherwise set to any non-negative value less than or equal
+            to the total number of copies for the shard (number of replicas + 1)
+        """
+        for param in (index, id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_update", id)
+        else:
+            path = _make_path(index, doc_type, id, "_update")
+
+        return self.transport.perform_request(
+            "POST", path, params=params, headers=headers, body=body
+        )
+
+    @query_params("requests_per_second")
+    def update_by_query_rethrottle(self, task_id, params=None, headers=None):
+        """
+        Changes the number of requests per second for a particular Update By Query
+        operation.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-update-by-query.html>`_
+
+        :arg task_id: The task id to rethrottle
+        :arg requests_per_second: The throttle to set on this request in
+            floating sub-requests per second. -1 means set no throttle.
+        """
+        if task_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'task_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_update_by_query", task_id, "_rethrottle"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def get_script_context(self, params=None, headers=None):
+        """
+        Returns all script contexts.
+        `<https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_script_context", params=params, headers=headers
+        )
+
+    @query_params()
+    def get_script_languages(self, params=None, headers=None):
+        """
+        Returns available script types, languages and contexts
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-scripting.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_script_language", params=params, headers=headers
+        )
+
+    @query_params(
+        "ccs_minimize_roundtrips",
+        "max_concurrent_searches",
+        "rest_total_hits_as_int",
+        "search_type",
+        "typed_keys",
+    )
+    def msearch_template(
+        self, body, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Allows to execute several search template operations in one request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-multi-search.html>`_
+
+        :arg body: The request definitions (metadata-search request
+            definition pairs), separated by newlines
+        :arg index: A comma-separated list of index names to use as
+            default
+        :arg doc_type: A comma-separated list of document types to use
+            as default
+        :arg ccs_minimize_roundtrips: Indicates whether network round-
+            trips should be minimized as part of cross-cluster search requests
+            execution  Default: true
+        :arg max_concurrent_searches: Controls the maximum number of
+            concurrent searches the multi search api will execute
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, query_and_fetch, dfs_query_then_fetch,
+            dfs_query_and_fetch
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_msearch", "template"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "field_statistics",
+        "fields",
+        "ids",
+        "offsets",
+        "payloads",
+        "positions",
+        "preference",
+        "realtime",
+        "routing",
+        "term_statistics",
+        "version",
+        "version_type",
+    )
+    def mtermvectors(
+        self, body=None, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Returns multiple termvectors in one request.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-multi-termvectors.html>`_
+
+        :arg body: Define ids, documents, parameters or a list of
+            parameters per document here. You must at least provide a list of
+            document ids. See documentation.
+        :arg index: The index in which the document resides.
+        :arg doc_type: The type of the document.
+        :arg field_statistics: Specifies if document count, sum of
+            document frequencies and sum of total term frequencies should be
+            returned. Applies to all returned documents unless otherwise specified
+            in body "params" or "docs".  Default: True
+        :arg fields: A comma-separated list of fields to return. Applies
+            to all returned documents unless otherwise specified in body "params" or
+            "docs".
+        :arg ids: A comma-separated list of documents ids. You must
+            define ids as parameter or set "ids" or "docs" in the request body
+        :arg offsets: Specifies if term offsets should be returned.
+            Applies to all returned documents unless otherwise specified in body
+            "params" or "docs".  Default: True
+        :arg payloads: Specifies if term payloads should be returned.
+            Applies to all returned documents unless otherwise specified in body
+            "params" or "docs".  Default: True
+        :arg positions: Specifies if term positions should be returned.
+            Applies to all returned documents unless otherwise specified in body
+            "params" or "docs".  Default: True
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random) .Applies to all returned documents
+            unless otherwise specified in body "params" or "docs".
+        :arg realtime: Specifies if requests are real-time as opposed to
+            near-real-time (default: true).
+        :arg routing: Specific routing value. Applies to all returned
+            documents unless otherwise specified in body "params" or "docs".
+        :arg term_statistics: Specifies if total term frequency and
+            document frequency should be returned. Applies to all returned documents
+            unless otherwise specified in body "params" or "docs".
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_mtermvectors")
+        else:
+            path = _make_path(index, doc_type, "_mtermvectors")
+
+        return self.transport.perform_request(
+            "POST", path, params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "ccs_minimize_roundtrips",
+        "expand_wildcards",
+        "explain",
+        "ignore_throttled",
+        "ignore_unavailable",
+        "preference",
+        "profile",
+        "rest_total_hits_as_int",
+        "routing",
+        "scroll",
+        "search_type",
+        "typed_keys",
+    )
+    def search_template(
+        self, body, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Allows to use the Mustache language to pre-render a search definition.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-template.html>`_
+
+        :arg body: The search definition template and its params
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg doc_type: A comma-separated list of document types to
+            search; leave empty to perform the operation on all types
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg ccs_minimize_roundtrips: Indicates whether network round-
+            trips should be minimized as part of cross-cluster search requests
+            execution  Default: true
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg explain: Specify whether to return detailed information
+            about score computation as part of a hit
+        :arg ignore_throttled: Whether specified concrete, expanded or
+            aliased indices should be ignored when throttled
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg profile: Specify whether to profile the query execution
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg routing: A comma-separated list of specific routing values
+        :arg scroll: Specify how long a consistent view of the index
+            should be maintained for scrolled search
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, query_and_fetch, dfs_query_then_fetch,
+            dfs_query_and_fetch
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_search", "template"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "field_statistics",
+        "fields",
+        "offsets",
+        "payloads",
+        "positions",
+        "preference",
+        "realtime",
+        "routing",
+        "term_statistics",
+        "version",
+        "version_type",
+    )
+    def termvectors(
+        self, index, body=None, doc_type=None, id=None, params=None, headers=None
+    ):
+        """
+        Returns information and statistics about terms in the fields of a particular
+        document.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-termvectors.html>`_
+
+        :arg index: The index in which the document resides.
+        :arg body: Define parameters and or supply a document to get
+            termvectors for. See documentation.
+        :arg doc_type: The type of the document.
+        :arg id: The id of the document, when not specified a doc param
+            should be supplied.
+        :arg field_statistics: Specifies if document count, sum of
+            document frequencies and sum of total term frequencies should be
+            returned.  Default: True
+        :arg fields: A comma-separated list of fields to return.
+        :arg offsets: Specifies if term offsets should be returned.
+            Default: True
+        :arg payloads: Specifies if term payloads should be returned.
+            Default: True
+        :arg positions: Specifies if term positions should be returned.
+            Default: True
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random).
+        :arg realtime: Specifies if request is real-time as opposed to
+            near-real-time (default: true).
+        :arg routing: Specific routing value.
+        :arg term_statistics: Specifies if total term frequency and
+            document frequency should be returned.
+        :arg version: Explicit version number for concurrency control
+        :arg version_type: Specific version type  Valid choices:
+            internal, external, external_gte, force
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        if doc_type in SKIP_IN_PATH:
+            path = _make_path(index, "_termvectors", id)
+        else:
+            path = _make_path(index, doc_type, id, "_termvectors")
+
+        return self.transport.perform_request(
+            "POST", path, params=params, headers=headers, body=body
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "allow_no_indices",
+        "analyze_wildcard",
+        "analyzer",
+        "conflicts",
+        "default_operator",
+        "df",
+        "expand_wildcards",
+        "from_",
+        "ignore_unavailable",
+        "lenient",
+        "max_docs",
+        "pipeline",
+        "preference",
+        "q",
+        "refresh",
+        "request_cache",
+        "requests_per_second",
+        "routing",
+        "scroll",
+        "scroll_size",
+        "search_timeout",
+        "search_type",
+        "size",
+        "slices",
+        "sort",
+        "stats",
+        "terminate_after",
+        "timeout",
+        "version",
+        "version_type",
+        "wait_for_active_shards",
+        "wait_for_completion",
+    )
+    def update_by_query(
+        self, index, body=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Performs an update on every document in the index without changing the source,
+        for example to pick up a mapping change.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/docs-update-by-query.html>`_
+
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg body: The search definition using the Query DSL
+        :arg doc_type: A comma-separated list of document types to
+            search; leave empty to perform the operation on all types
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg conflicts: What to do when the update by query hits version
+            conflicts?  Valid choices: abort, proceed  Default: abort
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg from_: Starting offset (default: 0)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg max_docs: Maximum number of documents to process (default:
+            all documents)
+        :arg pipeline: Ingest pipeline to set on index requests made by
+            this action. (default: none)
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg refresh: Should the affected indexes be refreshed?
+        :arg request_cache: Specify if request cache should be used for
+            this request or not, defaults to index level setting
+        :arg requests_per_second: The throttle to set on this request in
+            sub-requests per second. -1 means no throttle.
+        :arg routing: A comma-separated list of specific routing values
+        :arg scroll: Specify how long a consistent view of the index
+            should be maintained for scrolled search
+        :arg scroll_size: Size on the scroll request powering the update
+            by query  Default: 100
+        :arg search_timeout: Explicit timeout for each search request.
+            Defaults to no timeout.
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, dfs_query_then_fetch
+        :arg size: Deprecated, please use `max_docs` instead
+        :arg slices: The number of slices this task should be divided
+            into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be
+            set to `auto`.  Default: 1
+        :arg sort: A comma-separated list of <field>:<direction> pairs
+        :arg stats: Specific 'tag' of the request for logging and
+            statistical purposes
+        :arg terminate_after: The maximum number of documents to collect
+            for each shard, upon reaching which the query execution will terminate
+            early.
+        :arg timeout: Time each individual bulk request should wait for
+            shards that are unavailable.  Default: 1m
+        :arg version: Specify whether to return document version as part
+            of a hit
+        :arg version_type: Should the document increment the version
+            number (internal) on hit or not (reindex)
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before proceeding with the update by query
+            operation. Defaults to 1, meaning the primary shard only. Set to `all`
+            for all shard copies, otherwise set to any non-negative value less than
+            or equal to the total number of copies for the shard (number of replicas
+            + 1)
+        :arg wait_for_completion: Should the request should block until
+            the update by query operation is complete.  Default: True
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_update_by_query"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/client/__pycache__/__init__.cpython-38.pyc b/elasticsearch_7/client/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..102af0071fd9eec99219004d4cfe2f2b52d1304e
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/__init__.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/async_search.cpython-38.pyc b/elasticsearch_7/client/__pycache__/async_search.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..281fcf09a91ee128f3510dcd7133c90f138b9025
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/async_search.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/autoscaling.cpython-38.pyc b/elasticsearch_7/client/__pycache__/autoscaling.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e304337ca6fdbb857420bca825bb2396acef6e4
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/autoscaling.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/cat.cpython-38.pyc b/elasticsearch_7/client/__pycache__/cat.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..21dbade42958c1a509a7cad205541d7835749889
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/cat.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/ccr.cpython-38.pyc b/elasticsearch_7/client/__pycache__/ccr.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..00a5d93515090f21c777d6edab0f01f73acad154
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/ccr.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/cluster.cpython-38.pyc b/elasticsearch_7/client/__pycache__/cluster.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5df7a74d88ed7ebef3f949490b6b47cb371cb5c
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/cluster.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/data_frame.cpython-38.pyc b/elasticsearch_7/client/__pycache__/data_frame.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d8ddb7373e2df04219fd905fce267faa24e2b82
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/data_frame.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/deprecation.cpython-38.pyc b/elasticsearch_7/client/__pycache__/deprecation.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ca1ac99919a870632d87a302eafb31526b5c18ad
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/deprecation.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/enrich.cpython-38.pyc b/elasticsearch_7/client/__pycache__/enrich.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0798d5076e4ea8fe80823064b7b7217519535640
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/enrich.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/eql.cpython-38.pyc b/elasticsearch_7/client/__pycache__/eql.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d6a516374d2d55a1f7ea9344c59a7bae80491d4
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/eql.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/graph.cpython-38.pyc b/elasticsearch_7/client/__pycache__/graph.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dbf8b886e186f4ebba086ede0eb71db669bd6914
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/graph.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/ilm.cpython-38.pyc b/elasticsearch_7/client/__pycache__/ilm.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a2744f072fffddb0182e662cf8f529e5519e435b
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/ilm.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/indices.cpython-38.pyc b/elasticsearch_7/client/__pycache__/indices.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8b08e918b2d4698783493206102bdf995d486c3
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/indices.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/ingest.cpython-38.pyc b/elasticsearch_7/client/__pycache__/ingest.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..768685e06ef4dfbc81375b1644df6f5c1de8c326
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/ingest.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/license.cpython-38.pyc b/elasticsearch_7/client/__pycache__/license.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..697e6044d0e4742612b72b526efb6c2edb682713
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/license.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/migration.cpython-38.pyc b/elasticsearch_7/client/__pycache__/migration.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..328c09bc5de339ea88642e12cc27826ef3a47508
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/migration.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/ml.cpython-38.pyc b/elasticsearch_7/client/__pycache__/ml.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22a8a03dad3f479f9d0457672ae2e343b022c349
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/ml.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/monitoring.cpython-38.pyc b/elasticsearch_7/client/__pycache__/monitoring.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..206ef11050c52d746ee786a365e9aeeff7865337
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/monitoring.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/nodes.cpython-38.pyc b/elasticsearch_7/client/__pycache__/nodes.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6356a443c895b90fd9d021525d223e6c47c7775e
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/nodes.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/remote.cpython-38.pyc b/elasticsearch_7/client/__pycache__/remote.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..141490d2a2414090431c425349b109b6ecff8bb2
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/remote.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/rollup.cpython-38.pyc b/elasticsearch_7/client/__pycache__/rollup.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c5a801ab20ad1df9d86b8ad83d8d8d64ef93746
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/rollup.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/searchable_snapshots.cpython-38.pyc b/elasticsearch_7/client/__pycache__/searchable_snapshots.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8436240bbc091fa3cce0662e9502fc25cb7b044c
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/searchable_snapshots.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/security.cpython-38.pyc b/elasticsearch_7/client/__pycache__/security.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c13ad8cd59ece03e92df0fc10c215d760190c912
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/security.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/slm.cpython-38.pyc b/elasticsearch_7/client/__pycache__/slm.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a5096ba61b841989a7327a239e8f98c4ed4ff705
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/slm.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/snapshot.cpython-38.pyc b/elasticsearch_7/client/__pycache__/snapshot.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f0824afbeaf5e11ae40eee57c8e17c942704301
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/snapshot.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/sql.cpython-38.pyc b/elasticsearch_7/client/__pycache__/sql.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bc661d3ea0a2dbbc4136392acb7d8cb7a612c1a9
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/sql.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/ssl.cpython-38.pyc b/elasticsearch_7/client/__pycache__/ssl.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ea7bc12d195ff98f388a3bbb61ba010a6fa9628
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/ssl.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/tasks.cpython-38.pyc b/elasticsearch_7/client/__pycache__/tasks.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a6e1d488cc7a86d83dadbcb484174da994089f5
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/tasks.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/transform.cpython-38.pyc b/elasticsearch_7/client/__pycache__/transform.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8d69484deafb93c568e4b51b5ea3bdb0724df7e4
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/transform.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/utils.cpython-38.pyc b/elasticsearch_7/client/__pycache__/utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6b083042a4cb55dfcb7cecab646c5b43e7ec47eb
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/utils.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/watcher.cpython-38.pyc b/elasticsearch_7/client/__pycache__/watcher.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6f5762b1802083799c2b33cd23c1479dea6bc55
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/watcher.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/__pycache__/xpack.cpython-38.pyc b/elasticsearch_7/client/__pycache__/xpack.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3903295974bbd0401c80274ab83ba0bd7ac7d1ae
Binary files /dev/null and b/elasticsearch_7/client/__pycache__/xpack.cpython-38.pyc differ
diff --git a/elasticsearch_7/client/async_search.py b/elasticsearch_7/client/async_search.py
new file mode 100644
index 0000000000000000000000000000000000000000..e156a7db8ea469fa24726ce3666e7fa801707e2f
--- /dev/null
+++ b/elasticsearch_7/client/async_search.py
@@ -0,0 +1,191 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, SKIP_IN_PATH, query_params, _make_path
+
+
+class AsyncSearchClient(NamespacedClient):
+    @query_params()
+    def delete(self, id, params=None, headers=None):
+        """
+        Deletes an async search by ID. If the search is still running, the search
+        request will be cancelled. Otherwise, the saved search results are deleted.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/async-search.html>`_
+
+        :arg id: The async search ID
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "DELETE", _make_path("_async_search", id), params=params, headers=headers
+        )
+
+    @query_params("keep_alive", "typed_keys", "wait_for_completion_timeout")
+    def get(self, id, params=None, headers=None):
+        """
+        Retrieves the results of a previously submitted async search request given its
+        ID.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/async-search.html>`_
+
+        :arg id: The async search ID
+        :arg keep_alive: Specify the time interval in which the results
+            (partial or final) for this search will be available
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        :arg wait_for_completion_timeout: Specify the time that the
+            request should block waiting for the final response
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "GET", _make_path("_async_search", id), params=params, headers=headers
+        )
+
+    @query_params(
+        "_source",
+        "_source_excludes",
+        "_source_includes",
+        "allow_no_indices",
+        "allow_partial_search_results",
+        "analyze_wildcard",
+        "analyzer",
+        "batched_reduce_size",
+        "default_operator",
+        "df",
+        "docvalue_fields",
+        "expand_wildcards",
+        "explain",
+        "from_",
+        "ignore_throttled",
+        "ignore_unavailable",
+        "keep_alive",
+        "keep_on_completion",
+        "lenient",
+        "max_concurrent_shard_requests",
+        "preference",
+        "q",
+        "request_cache",
+        "routing",
+        "search_type",
+        "seq_no_primary_term",
+        "size",
+        "sort",
+        "stats",
+        "stored_fields",
+        "suggest_field",
+        "suggest_mode",
+        "suggest_size",
+        "suggest_text",
+        "terminate_after",
+        "timeout",
+        "track_scores",
+        "track_total_hits",
+        "typed_keys",
+        "version",
+        "wait_for_completion_timeout",
+    )
+    def submit(self, body=None, index=None, params=None, headers=None):
+        """
+        Executes a search request asynchronously.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/async-search.html>`_
+
+        :arg body: The search definition using the Query DSL
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg _source: True or false to return the _source field or not,
+            or a list of fields to return
+        :arg _source_excludes: A list of fields to exclude from the
+            returned _source field
+        :arg _source_includes: A list of fields to extract and return
+            from the _source field
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg allow_partial_search_results: Indicate if an error should
+            be returned if there is a partial search failure or timeout  Default:
+            True
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg batched_reduce_size: The number of shard results that
+            should be reduced at once on the coordinating node. This value should be
+            used as the granularity at which progress results will be made
+            available.  Default: 5
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg docvalue_fields: A comma-separated list of fields to return
+            as the docvalue representation of a field for each hit
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg explain: Specify whether to return detailed information
+            about score computation as part of a hit
+        :arg from_: Starting offset (default: 0)
+        :arg ignore_throttled: Whether specified concrete, expanded or
+            aliased indices should be ignored when throttled
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg keep_alive: Update the time interval in which the results
+            (partial or final) for this search will be available  Default: 5d
+        :arg keep_on_completion: Control whether the response should be
+            stored in the cluster if it completed within the provided
+            [wait_for_completion] time (default: false)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg max_concurrent_shard_requests: The number of concurrent
+            shard requests per node this search executes concurrently. This value
+            should be used to limit the impact of the search on the cluster in order
+            to limit the number of concurrent shard requests  Default: 5
+        :arg preference: Specify the node or shard the operation should
+            be performed on (default: random)
+        :arg q: Query in the Lucene query string syntax
+        :arg request_cache: Specify if request cache should be used for
+            this request or not, defaults to true
+        :arg routing: A comma-separated list of specific routing values
+        :arg search_type: Search operation type  Valid choices:
+            query_then_fetch, dfs_query_then_fetch
+        :arg seq_no_primary_term: Specify whether to return sequence
+            number and primary term of the last modification of each hit
+        :arg size: Number of hits to return (default: 10)
+        :arg sort: A comma-separated list of <field>:<direction> pairs
+        :arg stats: Specific 'tag' of the request for logging and
+            statistical purposes
+        :arg stored_fields: A comma-separated list of stored fields to
+            return as part of a hit
+        :arg suggest_field: Specify which field to use for suggestions
+        :arg suggest_mode: Specify suggest mode  Valid choices: missing,
+            popular, always  Default: missing
+        :arg suggest_size: How many suggestions to return in response
+        :arg suggest_text: The source text for which the suggestions
+            should be returned
+        :arg terminate_after: The maximum number of documents to collect
+            for each shard, upon reaching which the query execution will terminate
+            early.
+        :arg timeout: Explicit operation timeout
+        :arg track_scores: Whether to calculate and return scores even
+            if they are not used for sorting
+        :arg track_total_hits: Indicate if the number of documents that
+            match the query should be tracked
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        :arg version: Specify whether to return document version as part
+            of a hit
+        :arg wait_for_completion_timeout: Specify the time that the
+            request should block waiting for the final response  Default: 1s
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, "_async_search"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/client/autoscaling.py b/elasticsearch_7/client/autoscaling.py
new file mode 100644
index 0000000000000000000000000000000000000000..3df7f5a9782add4742b751f763b53707cc7b075b
--- /dev/null
+++ b/elasticsearch_7/client/autoscaling.py
@@ -0,0 +1,75 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, SKIP_IN_PATH, _make_path
+
+
+class AutoscalingClient(NamespacedClient):
+    @query_params()
+    def get_autoscaling_decision(self, params=None, headers=None):
+        """
+        Gets the current autoscaling decision based on the configured autoscaling
+        policy, indicating whether or not autoscaling is needed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-decision.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_autoscaling/decision", params=params, headers=headers
+        )
+
+    @query_params()
+    def delete_autoscaling_policy(self, name, params=None, headers=None):
+        """
+        Deletes an autoscaling policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-delete-autoscaling-policy.html>`_
+
+        :arg name: the name of the autoscaling policy
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_autoscaling", "policy", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def put_autoscaling_policy(self, name, body, params=None, headers=None):
+        """
+        Creates a new autoscaling policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-put-autoscaling-policy.html>`_
+
+        :arg name: the name of the autoscaling policy
+        :arg body: the specification of the autoscaling policy
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_autoscaling", "policy", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def get_autoscaling_policy(self, name, params=None, headers=None):
+        """
+        Retrieves an autoscaling policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-policy.html>`_
+
+        :arg name: the name of the autoscaling policy
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_autoscaling", "policy", name),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/client/cat.py b/elasticsearch_7/client/cat.py
new file mode 100644
index 0000000000000000000000000000000000000000..441e2e88b29d11a7aed7d4a2ddf8897770ae0552
--- /dev/null
+++ b/elasticsearch_7/client/cat.py
@@ -0,0 +1,724 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path
+
+
+class CatClient(NamespacedClient):
+    @query_params("expand_wildcards", "format", "h", "help", "local", "s", "v")
+    def aliases(self, name=None, params=None, headers=None):
+        """
+        Shows information about currently configured aliases to indices including
+        filter and routing infos.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-alias.html>`_
+
+        :arg name: A comma-separated list of alias names to return
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_cat", "aliases", name), params=params, headers=headers
+        )
+
+    @query_params("bytes", "format", "h", "help", "local", "master_timeout", "s", "v")
+    def allocation(self, node_id=None, params=None, headers=None):
+        """
+        Provides a snapshot of how many shards are allocated to each data node and how
+        much disk space they are using.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-allocation.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "allocation", node_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("format", "h", "help", "s", "v")
+    def count(self, index=None, params=None, headers=None):
+        """
+        Provides quick access to the document count of the entire cluster, or
+        individual indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-count.html>`_
+
+        :arg index: A comma-separated list of index names to limit the
+            returned information
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_cat", "count", index), params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "s", "time", "ts", "v")
+    def health(self, params=None, headers=None):
+        """
+        Returns a concise representation of the cluster health.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-health.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg ts: Set to false to disable timestamping  Default: True
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", "/_cat/health", params=params, headers=headers
+        )
+
+    @query_params("help", "s")
+    def help(self, params=None, headers=None):
+        """
+        Returns help for the Cat APIs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat.html>`_
+
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        """
+        return self.transport.perform_request(
+            "GET", "/_cat", params=params, headers=headers
+        )
+
+    @query_params(
+        "bytes",
+        "expand_wildcards",
+        "format",
+        "h",
+        "health",
+        "help",
+        "include_unloaded_segments",
+        "local",
+        "master_timeout",
+        "pri",
+        "s",
+        "time",
+        "v",
+    )
+    def indices(self, index=None, params=None, headers=None):
+        """
+        Returns information about indices: number of primaries and replicas, document
+        counts, disk size, ...
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-indices.html>`_
+
+        :arg index: A comma-separated list of index names to limit the
+            returned information
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg health: A health status ("green", "yellow", or "red" to
+            filter only indices matching the specified health status  Valid choices:
+            green, yellow, red
+        :arg help: Return help information
+        :arg include_unloaded_segments: If set to true segment stats
+            will include stats for segments that are not currently loaded into
+            memory
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg pri: Set to true to return stats only for primary shards
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_cat", "indices", index), params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "v")
+    def master(self, params=None, headers=None):
+        """
+        Returns information about the master node.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-master.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", "/_cat/master", params=params, headers=headers
+        )
+
+    @query_params(
+        "bytes",
+        "format",
+        "full_id",
+        "h",
+        "help",
+        "local",
+        "master_timeout",
+        "s",
+        "time",
+        "v",
+    )
+    def nodes(self, params=None, headers=None):
+        """
+        Returns basic statistics about performance of cluster nodes.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-nodes.html>`_
+
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg full_id: Return the full node ID instead of the shortened
+            version (default: false)
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Calculate the selected nodes using the local cluster
+            state rather than the state from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", "/_cat/nodes", params=params, headers=headers
+        )
+
+    @query_params(
+        "active_only", "bytes", "detailed", "format", "h", "help", "s", "time", "v"
+    )
+    def recovery(self, index=None, params=None, headers=None):
+        """
+        Returns information about index shard recoveries, both on-going completed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-recovery.html>`_
+
+        :arg index: Comma-separated list or wildcard expression of index
+            names to limit the returned information
+        :arg active_only: If `true`, the response only includes ongoing
+            shard recoveries
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg detailed: If `true`, the response includes detailed
+            information about shard recoveries
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_cat", "recovery", index), params=params, headers=headers
+        )
+
+    @query_params(
+        "bytes", "format", "h", "help", "local", "master_timeout", "s", "time", "v"
+    )
+    def shards(self, index=None, params=None, headers=None):
+        """
+        Provides a detailed view of shard allocation on nodes.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-shards.html>`_
+
+        :arg index: A comma-separated list of index names to limit the
+            returned information
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_cat", "shards", index), params=params, headers=headers
+        )
+
+    @query_params("bytes", "format", "h", "help", "s", "v")
+    def segments(self, index=None, params=None, headers=None):
+        """
+        Provides low-level information about the segments in the shards of an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-segments.html>`_
+
+        :arg index: A comma-separated list of index names to limit the
+            returned information
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_cat", "segments", index), params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "time", "v")
+    def pending_tasks(self, params=None, headers=None):
+        """
+        Returns a concise representation of the cluster pending tasks.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-pending-tasks.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", "/_cat/pending_tasks", params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "size", "v")
+    def thread_pool(self, thread_pool_patterns=None, params=None, headers=None):
+        """
+        Returns cluster-wide thread pool statistics per node. By default the active,
+        queue and rejected statistics are returned for all thread pools.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-thread-pool.html>`_
+
+        :arg thread_pool_patterns: A comma-separated list of regular-
+            expressions to filter the thread pools in the output
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg size: The multiplier in which to display values  Valid
+            choices: , k, m, g, t, p
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "thread_pool", thread_pool_patterns),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("bytes", "format", "h", "help", "s", "v")
+    def fielddata(self, fields=None, params=None, headers=None):
+        """
+        Shows how much heap memory is currently being used by fielddata on every data
+        node in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-fielddata.html>`_
+
+        :arg fields: A comma-separated list of fields to return in the
+            output
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "fielddata", fields),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "v")
+    def plugins(self, params=None, headers=None):
+        """
+        Returns information about installed plugins across nodes node.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-plugins.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", "/_cat/plugins", params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "v")
+    def nodeattrs(self, params=None, headers=None):
+        """
+        Returns information about custom node attributes.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-nodeattrs.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", "/_cat/nodeattrs", params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "v")
+    def repositories(self, params=None, headers=None):
+        """
+        Returns information about snapshot repositories registered in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-repositories.html>`_
+
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", "/_cat/repositories", params=params, headers=headers
+        )
+
+    @query_params(
+        "format", "h", "help", "ignore_unavailable", "master_timeout", "s", "time", "v"
+    )
+    def snapshots(self, repository=None, params=None, headers=None):
+        """
+        Returns all snapshots in a specific repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-snapshots.html>`_
+
+        :arg repository: Name of repository from which to fetch the
+            snapshot information
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg ignore_unavailable: Set to true to ignore unavailable
+            snapshots
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "snapshots", repository),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "actions",
+        "detailed",
+        "format",
+        "h",
+        "help",
+        "node_id",
+        "parent_task",
+        "s",
+        "time",
+        "v",
+    )
+    def tasks(self, params=None, headers=None):
+        """
+        Returns information about the tasks currently executing on one or more nodes in
+        the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/tasks.html>`_
+
+        :arg actions: A comma-separated list of actions that should be
+            returned. Leave empty to return all.
+        :arg detailed: Return detailed task information (default: false)
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg parent_task: Return tasks with specified parent task id.
+            Set to -1 to return all.
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", "/_cat/tasks", params=params, headers=headers
+        )
+
+    @query_params("format", "h", "help", "local", "master_timeout", "s", "v")
+    def templates(self, name=None, params=None, headers=None):
+        """
+        Returns information about existing templates.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-templates.html>`_
+
+        :arg name: A pattern that returned template names must match
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_cat", "templates", name), params=params, headers=headers
+        )
+
+    @query_params("allow_no_match", "bytes", "format", "h", "help", "s", "time", "v")
+    def ml_data_frame_analytics(self, id=None, params=None, headers=None):
+        """
+        Gets configuration and usage information about data frame analytics jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no configs. (This includes `_all` string or when no configs have
+            been specified)
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "ml", "data_frame", "analytics", id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_datafeeds", "format", "h", "help", "s", "time", "v")
+    def ml_datafeeds(self, datafeed_id=None, params=None, headers=None):
+        """
+        Gets configuration and usage information about datafeeds.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-datafeeds.html>`_
+
+        :arg datafeed_id: The ID of the datafeeds stats to fetch
+        :arg allow_no_datafeeds: Whether to ignore if a wildcard
+            expression matches no datafeeds. (This includes `_all` string or when no
+            datafeeds have been specified)
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "ml", "datafeeds", datafeed_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_jobs", "bytes", "format", "h", "help", "s", "time", "v")
+    def ml_jobs(self, job_id=None, params=None, headers=None):
+        """
+        Gets configuration and usage information about anomaly detection jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-anomaly-detectors.html>`_
+
+        :arg job_id: The ID of the jobs stats to fetch
+        :arg allow_no_jobs: Whether to ignore if a wildcard expression
+            matches no jobs. (This includes `_all` string or when no jobs have been
+            specified)
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "ml", "anomaly_detectors", job_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_match",
+        "bytes",
+        "format",
+        "from_",
+        "h",
+        "help",
+        "s",
+        "size",
+        "time",
+        "v",
+    )
+    def ml_trained_models(self, model_id=None, params=None, headers=None):
+        """
+        Gets configuration and usage information about inference trained models.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-trained-model.html>`_
+
+        :arg model_id: The ID of the trained models stats to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no trained models. (This includes `_all` string or when no
+            trained models have been specified)  Default: True
+        :arg bytes: The unit in which to display byte values  Valid
+            choices: b, k, kb, m, mb, g, gb, t, tb, p, pb
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg from_: skips a number of trained models
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg size: specifies a max number of trained models to get
+            Default: 100
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "ml", "trained_models", model_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_match", "format", "from_", "h", "help", "s", "size", "time", "v"
+    )
+    def transforms(self, transform_id=None, params=None, headers=None):
+        """
+        Gets configuration and usage information about transforms.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cat-transforms.html>`_
+
+        :arg transform_id: The id of the transform for which to get
+            stats. '_all' or '*' implies all transforms
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no transforms. (This includes `_all` string or when no
+            transforms have been specified)
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        :arg from_: skips a number of transform configs, defaults to 0
+        :arg h: Comma-separated list of column names to display
+        :arg help: Return help information
+        :arg s: Comma-separated list of column names or column aliases
+            to sort by
+        :arg size: specifies a max number of transforms to get, defaults
+            to 100
+        :arg time: The unit in which to display time values  Valid
+            choices: d, h, m, s, ms, micros, nanos
+        :arg v: Verbose mode. Display column headers
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_cat", "transforms", transform_id),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/client/ccr.py b/elasticsearch_7/client/ccr.py
new file mode 100644
index 0000000000000000000000000000000000000000..4418c82c6efd6c23178f8b4e6392265c36aef191
--- /dev/null
+++ b/elasticsearch_7/client/ccr.py
@@ -0,0 +1,259 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class CcrClient(NamespacedClient):
+    @query_params()
+    def delete_auto_follow_pattern(self, name, params=None, headers=None):
+        """
+        Deletes auto-follow patterns.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-delete-auto-follow-pattern.html>`_
+
+        :arg name: The name of the auto follow pattern.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ccr", "auto_follow", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("wait_for_active_shards")
+    def follow(self, index, body, params=None, headers=None):
+        """
+        Creates a new follower index configured to follow the referenced leader index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-put-follow.html>`_
+
+        :arg index: The name of the follower index
+        :arg body: The name of the leader index and other optional ccr
+            related parameters
+        :arg wait_for_active_shards: Sets the number of shard copies
+            that must be active before returning. Defaults to 0. Set to `all` for
+            all shard copies, otherwise set to any non-negative value less than or
+            equal to the total number of copies for the shard (number of replicas +
+            1)  Default: 0
+        """
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_ccr", "follow"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def follow_info(self, index, params=None, headers=None):
+        """
+        Retrieves information about all follower indices, including parameters and
+        status for each follower index
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-get-follow-info.html>`_
+
+        :arg index: A comma-separated list of index patterns; use `_all`
+            to perform the operation on all indices
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_ccr", "info"), params=params, headers=headers
+        )
+
+    @query_params()
+    def follow_stats(self, index, params=None, headers=None):
+        """
+        Retrieves follower stats. return shard-level stats about the following tasks
+        associated with each shard for the specified indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-get-follow-stats.html>`_
+
+        :arg index: A comma-separated list of index patterns; use `_all`
+            to perform the operation on all indices
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_ccr", "stats"), params=params, headers=headers
+        )
+
+    @query_params()
+    def forget_follower(self, index, body, params=None, headers=None):
+        """
+        Removes the follower retention leases from the leader.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-post-forget-follower.html>`_
+
+        :arg index: the name of the leader index for which specified
+            follower retention leases should be removed
+        :arg body: the name and UUID of the follower index, the name of
+            the cluster containing the follower index, and the alias from the
+            perspective of that cluster for the remote cluster containing the leader
+            index
+        """
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, "_ccr", "forget_follower"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def get_auto_follow_pattern(self, name=None, params=None, headers=None):
+        """
+        Gets configured auto-follow patterns. Returns the specified auto-follow pattern
+        collection.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-get-auto-follow-pattern.html>`_
+
+        :arg name: The name of the auto follow pattern.
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ccr", "auto_follow", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def pause_follow(self, index, params=None, headers=None):
+        """
+        Pauses a follower index. The follower index will not fetch any additional
+        operations from the leader index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-post-pause-follow.html>`_
+
+        :arg index: The name of the follower index that should pause
+            following its leader index.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, "_ccr", "pause_follow"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def put_auto_follow_pattern(self, name, body, params=None, headers=None):
+        """
+        Creates a new named collection of auto-follow patterns against a specified
+        remote cluster. Newly created indices on the remote cluster matching any of the
+        specified patterns will be automatically configured as follower indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-put-auto-follow-pattern.html>`_
+
+        :arg name: The name of the auto follow pattern.
+        :arg body: The specification of the auto follow pattern
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_ccr", "auto_follow", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def resume_follow(self, index, body=None, params=None, headers=None):
+        """
+        Resumes a follower index that has been paused
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-post-resume-follow.html>`_
+
+        :arg index: The name of the follow index to resume following.
+        :arg body: The name of the leader index and other optional ccr
+            related parameters
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, "_ccr", "resume_follow"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def stats(self, params=None, headers=None):
+        """
+        Gets all stats related to cross-cluster replication.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-get-stats.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_ccr/stats", params=params, headers=headers
+        )
+
+    @query_params()
+    def unfollow(self, index, params=None, headers=None):
+        """
+        Stops the following task associated with a follower index and removes index
+        metadata and settings associated with cross-cluster replication.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-post-unfollow.html>`_
+
+        :arg index: The name of the follower index that should be turned
+            into a regular index.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, "_ccr", "unfollow"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def pause_auto_follow_pattern(self, name, params=None, headers=None):
+        """
+        Pauses an auto-follow pattern
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-pause-auto-follow-pattern.html>`_
+
+        :arg name: The name of the auto follow pattern that should pause
+            discovering new indices to follow.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ccr", "auto_follow", name, "pause"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def resume_auto_follow_pattern(self, name, params=None, headers=None):
+        """
+        Resumes an auto-follow pattern that has been paused
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ccr-resume-auto-follow-pattern.html>`_
+
+        :arg name: The name of the auto follow pattern to resume
+            discovering new indices to follow.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ccr", "auto_follow", name, "resume"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/client/cluster.py b/elasticsearch_7/client/cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e2de22eaf9f149598d8fd2f7c68cb3d37d9e749
--- /dev/null
+++ b/elasticsearch_7/client/cluster.py
@@ -0,0 +1,361 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class ClusterClient(NamespacedClient):
+    @query_params(
+        "expand_wildcards",
+        "level",
+        "local",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+        "wait_for_events",
+        "wait_for_no_initializing_shards",
+        "wait_for_no_relocating_shards",
+        "wait_for_nodes",
+        "wait_for_status",
+    )
+    def health(self, index=None, params=None, headers=None):
+        """
+        Returns basic information about the health of the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-health.html>`_
+
+        :arg index: Limit the information returned to a specific index
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg level: Specify the level of detail for returned information
+            Valid choices: cluster, indices, shards  Default: cluster
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Wait until the specified number of
+            shards is active
+        :arg wait_for_events: Wait until all currently queued events
+            with the given priority are processed  Valid choices: immediate, urgent,
+            high, normal, low, languid
+        :arg wait_for_no_initializing_shards: Whether to wait until
+            there are no initializing shards in the cluster
+        :arg wait_for_no_relocating_shards: Whether to wait until there
+            are no relocating shards in the cluster
+        :arg wait_for_nodes: Wait until the specified number of nodes is
+            available
+        :arg wait_for_status: Wait until cluster is in a specific state
+            Valid choices: green, yellow, red
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_cluster", "health", index),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("local", "master_timeout")
+    def pending_tasks(self, params=None, headers=None):
+        """
+        Returns a list of any cluster-level changes (e.g. create index, update mapping,
+        allocate or fail shard) which have not yet been executed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-pending.html>`_
+
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        return self.transport.perform_request(
+            "GET", "/_cluster/pending_tasks", params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flat_settings",
+        "ignore_unavailable",
+        "local",
+        "master_timeout",
+        "wait_for_metadata_version",
+        "wait_for_timeout",
+    )
+    def state(self, metric=None, index=None, params=None, headers=None):
+        """
+        Returns a comprehensive information about the state of the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-state.html>`_
+
+        :arg metric: Limit the information returned to the specified
+            metrics  Valid choices: _all, blocks, metadata, nodes, routing_table,
+            routing_nodes, master_node, version
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg wait_for_metadata_version: Wait for the metadata version to
+            be equal or greater than the specified metadata version
+        :arg wait_for_timeout: The maximum time to wait for
+            wait_for_metadata_version before timing out
+        """
+        if index and metric in SKIP_IN_PATH:
+            metric = "_all"
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_cluster", "state", metric, index),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("flat_settings", "timeout")
+    def stats(self, node_id=None, params=None, headers=None):
+        """
+        Returns high-level overview of cluster statistics.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-stats.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg timeout: Explicit operation timeout
+        """
+        return self.transport.perform_request(
+            "GET",
+            "/_cluster/stats"
+            if node_id in SKIP_IN_PATH
+            else _make_path("_cluster", "stats", "nodes", node_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "dry_run", "explain", "master_timeout", "metric", "retry_failed", "timeout"
+    )
+    def reroute(self, body=None, params=None, headers=None):
+        """
+        Allows to manually change the allocation of individual shards in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-reroute.html>`_
+
+        :arg body: The definition of `commands` to perform (`move`,
+            `cancel`, `allocate`)
+        :arg dry_run: Simulate the operation only and return the
+            resulting state
+        :arg explain: Return an explanation of why the commands can or
+            cannot be executed
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg metric: Limit the information returned to the specified
+            metrics. Defaults to all but metadata  Valid choices: _all, blocks,
+            metadata, nodes, routing_table, master_node, version
+        :arg retry_failed: Retries allocation of shards that are blocked
+            due to too many subsequent allocation failures
+        :arg timeout: Explicit operation timeout
+        """
+        return self.transport.perform_request(
+            "POST", "/_cluster/reroute", params=params, headers=headers, body=body
+        )
+
+    @query_params("flat_settings", "include_defaults", "master_timeout", "timeout")
+    def get_settings(self, params=None, headers=None):
+        """
+        Returns cluster settings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-update-settings.html>`_
+
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg include_defaults: Whether to return all default clusters
+            setting.
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        return self.transport.perform_request(
+            "GET", "/_cluster/settings", params=params, headers=headers
+        )
+
+    @query_params("flat_settings", "master_timeout", "timeout")
+    def put_settings(self, body, params=None, headers=None):
+        """
+        Updates the cluster settings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-update-settings.html>`_
+
+        :arg body: The settings to be updated. Can be either `transient`
+            or `persistent` (survives cluster restart).
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "PUT", "/_cluster/settings", params=params, headers=headers, body=body
+        )
+
+    @query_params()
+    def remote_info(self, params=None, headers=None):
+        """
+        Returns the information about configured remote clusters.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-remote-info.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_remote/info", params=params, headers=headers
+        )
+
+    @query_params("include_disk_info", "include_yes_decisions")
+    def allocation_explain(self, body=None, params=None, headers=None):
+        """
+        Provides explanations for shard allocations in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-allocation-explain.html>`_
+
+        :arg body: The index, shard, and primary flag to explain. Empty
+            means 'explain the first unassigned shard'
+        :arg include_disk_info: Return information about disk usage and
+            shard sizes (default: false)
+        :arg include_yes_decisions: Return 'YES' decisions in
+            explanation (default: false)
+        """
+        return self.transport.perform_request(
+            "POST",
+            "/_cluster/allocation/explain",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("master_timeout", "timeout")
+    def delete_component_template(self, name, params=None, headers=None):
+        """
+        Deletes a component template
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-component-template.html>`_
+
+        :arg name: The name of the template
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_component_template", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("local", "master_timeout")
+    def get_component_template(self, name=None, params=None, headers=None):
+        """
+        Returns one or more component templates
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-component-template.html>`_
+
+        :arg name: The comma separated names of the component templates
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_component_template", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("create", "master_timeout", "timeout")
+    def put_component_template(self, name, body, params=None, headers=None):
+        """
+        Creates or updates a component template
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-component-template.html>`_
+
+        :arg name: The name of the template
+        :arg body: The template definition
+        :arg create: Whether the index template should only be added if
+            new or can also replace an existing one
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_component_template", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("local", "master_timeout")
+    def exists_component_template(self, name, params=None, headers=None):
+        """
+        Returns information about whether a particular component template exist
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-component-template.html>`_
+
+        :arg name: The name of the template
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "HEAD",
+            _make_path("_component_template", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("wait_for_removal")
+    def delete_voting_config_exclusions(self, params=None, headers=None):
+        """
+        Clears cluster voting config exclusions.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/voting-config-exclusions.html>`_
+
+        :arg wait_for_removal: Specifies whether to wait for all
+            excluded nodes to be removed from the cluster before clearing the voting
+            configuration exclusions list.  Default: True
+        """
+        return self.transport.perform_request(
+            "DELETE",
+            "/_cluster/voting_config_exclusions",
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("node_ids", "node_names", "timeout")
+    def post_voting_config_exclusions(self, params=None, headers=None):
+        """
+        Updates the cluster voting config exclusions by node ids or node names.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/voting-config-exclusions.html>`_
+
+        :arg node_ids: A comma-separated list of the persistent ids of
+            the nodes to exclude from the voting configuration. If specified, you
+            may not also specify ?node_names.
+        :arg node_names: A comma-separated list of the names of the
+            nodes to exclude from the voting configuration. If specified, you may
+            not also specify ?node_ids.
+        :arg timeout: Explicit operation timeout  Default: 30s
+        """
+        return self.transport.perform_request(
+            "POST", "/_cluster/voting_config_exclusions", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/client/data_frame.py b/elasticsearch_7/client/data_frame.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd5f3221fb0d18eadd627fd5bb9b2bb8e2408247
--- /dev/null
+++ b/elasticsearch_7/client/data_frame.py
@@ -0,0 +1,136 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class Data_FrameClient(NamespacedClient):
+    @query_params()
+    def delete_data_frame_transform(self, transform_id, params=None, headers=None):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/delete-data-frame-transform.html>`_
+
+        :arg transform_id: The id of the transform to delete
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_data_frame", "transforms", transform_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("from_", "size")
+    def get_data_frame_transform(self, transform_id=None, params=None, headers=None):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/get-data-frame-transform.html>`_
+
+        :arg transform_id: The id or comma delimited list of id expressions of
+            the transforms to get, '_all' or '*' implies get all transforms
+        :arg from_: skips a number of transform configs, defaults to 0
+        :arg size: specifies a max number of transforms to get, defaults to 100
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_data_frame", "transforms", transform_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def get_data_frame_transform_stats(
+        self, transform_id=None, params=None, headers=None
+    ):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/get-data-frame-transform-stats.html>`_
+
+        :arg transform_id: The id of the transform for which to get stats.
+            '_all' or '*' implies all transforms
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_data_frame", "transforms", transform_id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def preview_data_frame_transform(self, body, params=None, headers=None):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/preview-data-frame-transform.html>`_
+
+        :arg body: The definition for the data_frame transform to preview
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+        return self.transport.perform_request(
+            "POST",
+            "/_data_frame/transforms/_preview",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def put_data_frame_transform(self, transform_id, body, params=None, headers=None):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/put-data-frame-transform.html>`_
+
+        :arg transform_id: The id of the new transform.
+        :arg body: The data frame transform definition
+        """
+        for param in (transform_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_data_frame", "transforms", transform_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("timeout")
+    def start_data_frame_transform(self, transform_id, params=None, headers=None):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/start-data-frame-transform.html>`_
+
+        :arg transform_id: The id of the transform to start
+        :arg timeout: Controls the time to wait for the transform to start
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_data_frame", "transforms", transform_id, "_start"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("timeout", "wait_for_completion")
+    def stop_data_frame_transform(self, transform_id, params=None, headers=None):
+        """
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.x/stop-data-frame-transform.html>`_
+
+        :arg transform_id: The id of the transform to stop
+        :arg timeout: Controls the time to wait until the transform has stopped.
+            Default to 30 seconds
+        :arg wait_for_completion: Whether to wait for the transform to fully
+            stop before returning or not. Default to false
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_data_frame", "transforms", transform_id, "_stop"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/client/deprecation.py b/elasticsearch_7/client/deprecation.py
new file mode 100644
index 0000000000000000000000000000000000000000..86e1d1ed5ea0a991747eaf3abce35e685eb60e34
--- /dev/null
+++ b/elasticsearch_7/client/deprecation.py
@@ -0,0 +1,21 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path
+
+
+class DeprecationClient(NamespacedClient):
+    @query_params()
+    def info(self, index=None, params=None, headers=None):
+        """
+        `<http://www.elastic.co/guide/en/migration/7.x/migration-api-deprecation.html>`_
+
+        :arg index: Index pattern
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path(index, "_xpack", "migration", "deprecations"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/client/enrich.py b/elasticsearch_7/client/enrich.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3ed996b235c24f0094b6a91296c1ed49d29aa69
--- /dev/null
+++ b/elasticsearch_7/client/enrich.py
@@ -0,0 +1,89 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class EnrichClient(NamespacedClient):
+    @query_params()
+    def delete_policy(self, name, params=None, headers=None):
+        """
+        Deletes an existing enrich policy and its enrich index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-enrich-policy-api.html>`_
+
+        :arg name: The name of the enrich policy
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_enrich", "policy", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("wait_for_completion")
+    def execute_policy(self, name, params=None, headers=None):
+        """
+        Creates the enrich index for an existing enrich policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/execute-enrich-policy-api.html>`_
+
+        :arg name: The name of the enrich policy
+        :arg wait_for_completion: Should the request should block until
+            the execution is complete.  Default: True
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_enrich", "policy", name, "_execute"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def get_policy(self, name=None, params=None, headers=None):
+        """
+        Gets information about an enrich policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-enrich-policy-api.html>`_
+
+        :arg name: A comma-separated list of enrich policy names
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_enrich", "policy", name), params=params, headers=headers
+        )
+
+    @query_params()
+    def put_policy(self, name, body, params=None, headers=None):
+        """
+        Creates a new enrich policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/put-enrich-policy-api.html>`_
+
+        :arg name: The name of the enrich policy
+        :arg body: The enrich policy to register
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_enrich", "policy", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def stats(self, params=None, headers=None):
+        """
+        Gets enrich coordinator statistics and information about enrich policies that
+        are currently executing.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/enrich-stats-api.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_enrich/_stats", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/client/eql.py b/elasticsearch_7/client/eql.py
new file mode 100644
index 0000000000000000000000000000000000000000..56f122f4b0fc89f5a382afb10e329ba56719bd05
--- /dev/null
+++ b/elasticsearch_7/client/eql.py
@@ -0,0 +1,29 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, SKIP_IN_PATH, query_params, _make_path
+
+
+class EqlClient(NamespacedClient):
+    @query_params()
+    def search(self, index, body, params=None, headers=None):
+        """
+        Returns results matching a query expressed in Event Query Language (EQL)
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html>`_
+
+        :arg index: The name of the index to scope the operation
+        :arg body: Eql request body. Use the `query` to limit the query
+            scope.
+        """
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, "_eql", "search"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/client/graph.py b/elasticsearch_7/client/graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ce2547e954e2c2d4aa3d49c2b865d86746dccd9
--- /dev/null
+++ b/elasticsearch_7/client/graph.py
@@ -0,0 +1,33 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class GraphClient(NamespacedClient):
+    @query_params("routing", "timeout")
+    def explore(self, index, body=None, doc_type=None, params=None, headers=None):
+        """
+        Explore extracted and summarized information about the documents and terms in
+        an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/graph-explore-api.html>`_
+
+        :arg index: A comma-separated list of index names to search; use
+            `_all` or empty string to perform the operation on all indices
+        :arg body: Graph Query DSL
+        :arg doc_type: A comma-separated list of document types to
+            search; leave empty to perform the operation on all types
+        :arg routing: Specific routing value
+        :arg timeout: Explicit operation timeout
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_graph", "explore"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/client/ilm.py b/elasticsearch_7/client/ilm.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c52def5afa781aac42aa95c11510e9674961d39
--- /dev/null
+++ b/elasticsearch_7/client/ilm.py
@@ -0,0 +1,162 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class IlmClient(NamespacedClient):
+    @query_params()
+    def delete_lifecycle(self, policy, params=None, headers=None):
+        """
+        Deletes the specified lifecycle policy definition. A currently used policy
+        cannot be deleted.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-delete-lifecycle.html>`_
+
+        :arg policy: The name of the index lifecycle policy
+        """
+        if policy in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'policy'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ilm", "policy", policy),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("only_errors", "only_managed")
+    def explain_lifecycle(self, index, params=None, headers=None):
+        """
+        Retrieves information about the index's current lifecycle state, such as the
+        currently executing phase, action, and step.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-explain-lifecycle.html>`_
+
+        :arg index: The name of the index to explain
+        :arg only_errors: filters the indices included in the response
+            to ones in an ILM error state, implies only_managed
+        :arg only_managed: filters the indices included in the response
+            to ones managed by ILM
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_ilm", "explain"), params=params, headers=headers
+        )
+
+    @query_params()
+    def get_lifecycle(self, policy=None, params=None, headers=None):
+        """
+        Returns the specified policy definition. Includes the policy version and last
+        modified date.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-get-lifecycle.html>`_
+
+        :arg policy: The name of the index lifecycle policy
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_ilm", "policy", policy), params=params, headers=headers
+        )
+
+    @query_params()
+    def get_status(self, params=None, headers=None):
+        """
+        Retrieves the current index lifecycle management (ILM) status.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-get-status.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_ilm/status", params=params, headers=headers
+        )
+
+    @query_params()
+    def move_to_step(self, index, body=None, params=None, headers=None):
+        """
+        Manually moves an index into the specified step and executes that step.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-move-to-step.html>`_
+
+        :arg index: The name of the index whose lifecycle step is to
+            change
+        :arg body: The new lifecycle step to move to
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ilm", "move", index),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def put_lifecycle(self, policy, body=None, params=None, headers=None):
+        """
+        Creates a lifecycle policy
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-put-lifecycle.html>`_
+
+        :arg policy: The name of the index lifecycle policy
+        :arg body: The lifecycle policy definition to register
+        """
+        if policy in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'policy'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_ilm", "policy", policy),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def remove_policy(self, index, params=None, headers=None):
+        """
+        Removes the assigned lifecycle policy and stops managing the specified index
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-remove-policy.html>`_
+
+        :arg index: The name of the index to remove policy on
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST", _make_path(index, "_ilm", "remove"), params=params, headers=headers
+        )
+
+    @query_params()
+    def retry(self, index, params=None, headers=None):
+        """
+        Retries executing the policy for an index that is in the ERROR step.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-retry-policy.html>`_
+
+        :arg index: The name of the indices (comma-separated) whose
+            failed lifecycle step is to be retry
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST", _make_path(index, "_ilm", "retry"), params=params, headers=headers
+        )
+
+    @query_params()
+    def start(self, params=None, headers=None):
+        """
+        Start the index lifecycle management (ILM) plugin.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-start.html>`_
+        """
+        return self.transport.perform_request(
+            "POST", "/_ilm/start", params=params, headers=headers
+        )
+
+    @query_params()
+    def stop(self, params=None, headers=None):
+        """
+        Halts all lifecycle management operations and stops the index lifecycle
+        management (ILM) plugin
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ilm-stop.html>`_
+        """
+        return self.transport.perform_request(
+            "POST", "/_ilm/stop", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/client/indices.py b/elasticsearch_7/client/indices.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe0e8883df38dda4671adc9fc6c87fb67ef026ff
--- /dev/null
+++ b/elasticsearch_7/client/indices.py
@@ -0,0 +1,1435 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class IndicesClient(NamespacedClient):
+    @query_params()
+    def analyze(self, body=None, index=None, params=None, headers=None):
+        """
+        Performs the analysis process on a text and return the tokens breakdown of the
+        text.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-analyze.html>`_
+
+        :arg body: Define analyzer/tokenizer parameters and the text on
+            which the analysis should be performed
+        :arg index: The name of the index to scope the operation
+        """
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, "_analyze"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
+    def refresh(self, index=None, params=None, headers=None):
+        """
+        Performs the refresh operation in one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-refresh.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        """
+        return self.transport.perform_request(
+            "POST", _make_path(index, "_refresh"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "force",
+        "ignore_unavailable",
+        "wait_if_ongoing",
+    )
+    def flush(self, index=None, params=None, headers=None):
+        """
+        Performs the flush operation on one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-flush.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string for all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg force: Whether a flush should be forced even if it is not
+            necessarily needed ie. if no changes will be committed to the index.
+            This is useful if transaction log IDs should be incremented even if no
+            uncommitted changes are present. (This setting can be considered as
+            internal)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg wait_if_ongoing: If set to true the flush operation will
+            block until the flush can be executed if another flush operation is
+            already executing. The default is true. If set to false the flush will
+            be skipped iff if another flush operation is already running.
+        """
+        return self.transport.perform_request(
+            "POST", _make_path(index, "_flush"), params=params, headers=headers
+        )
+
+    @query_params(
+        "include_type_name", "master_timeout", "timeout", "wait_for_active_shards"
+    )
+    def create(self, index, body=None, params=None, headers=None):
+        """
+        Creates an index with optional settings and mappings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-create-index.html>`_
+
+        :arg index: The name of the index
+        :arg body: The configuration for the index (`settings` and
+            `mappings`)
+        :arg include_type_name: Whether a type should be expected in the
+            body of the mappings.
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Set the number of active shards to
+            wait for before the operation returns.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "PUT", _make_path(index), params=params, headers=headers, body=body
+        )
+
+    @query_params("master_timeout", "timeout", "wait_for_active_shards")
+    def clone(self, index, target, body=None, params=None, headers=None):
+        """
+        Clones an index
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-clone-index.html>`_
+
+        :arg index: The name of the source index to clone
+        :arg target: The name of the target index to clone into
+        :arg body: The configuration for the target index (`settings`
+            and `aliases`)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Set the number of active shards to
+            wait for on the cloned index before the operation returns.
+        """
+        for param in (index, target):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_clone", target),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flat_settings",
+        "ignore_unavailable",
+        "include_defaults",
+        "include_type_name",
+        "local",
+        "master_timeout",
+    )
+    def get(self, index, params=None, headers=None):
+        """
+        Returns information about one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-get-index.html>`_
+
+        :arg index: A comma-separated list of index names
+        :arg allow_no_indices: Ignore if a wildcard expression resolves
+            to no concrete indices (default: false)
+        :arg expand_wildcards: Whether wildcard expressions should get
+            expanded to open or closed indices (default: open)  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg ignore_unavailable: Ignore unavailable indexes (default:
+            false)
+        :arg include_defaults: Whether to return all default setting for
+            each of the indices.
+        :arg include_type_name: Whether to add the type name to the
+            response (default: false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "GET", _make_path(index), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    def open(self, index, params=None, headers=None):
+        """
+        Opens an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-open-close.html>`_
+
+        :arg index: A comma separated list of indices to open
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: closed
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of active shards to
+            wait for before the operation returns.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST", _make_path(index, "_open"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    def close(self, index, params=None, headers=None):
+        """
+        Closes an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-open-close.html>`_
+
+        :arg index: A comma separated list of indices to close
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of active shards to
+            wait for before the operation returns.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST", _make_path(index, "_close"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "master_timeout",
+        "timeout",
+    )
+    def delete(self, index, params=None, headers=None):
+        """
+        Deletes an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-delete-index.html>`_
+
+        :arg index: A comma-separated list of indices to delete; use
+            `_all` or `*` string to delete all indices
+        :arg allow_no_indices: Ignore if a wildcard expression resolves
+            to no concrete indices (default: false)
+        :arg expand_wildcards: Whether wildcard expressions should get
+            expanded to open or closed indices (default: open)  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Ignore unavailable indexes (default:
+            false)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "DELETE", _make_path(index), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flat_settings",
+        "ignore_unavailable",
+        "include_defaults",
+        "local",
+    )
+    def exists(self, index, params=None, headers=None):
+        """
+        Returns information about whether a particular index exists.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-exists.html>`_
+
+        :arg index: A comma-separated list of index names
+        :arg allow_no_indices: Ignore if a wildcard expression resolves
+            to no concrete indices (default: false)
+        :arg expand_wildcards: Whether wildcard expressions should get
+            expanded to open or closed indices (default: open)  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg ignore_unavailable: Ignore unavailable indexes (default:
+            false)
+        :arg include_defaults: Whether to return all default setting for
+            each of the indices.
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "HEAD", _make_path(index), params=params, headers=headers
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local")
+    def exists_type(self, index, doc_type, params=None, headers=None):
+        """
+        Returns information about whether a particular document type exists.
+        (DEPRECATED)
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-types-exists.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` to
+            check the types across all indices
+        :arg doc_type: A comma-separated list of document types to check
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        for param in (index, doc_type):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "HEAD",
+            _make_path(index, "_mapping", doc_type),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "include_type_name",
+        "master_timeout",
+        "timeout",
+    )
+    def put_mapping(self, body, index=None, doc_type=None, params=None, headers=None):
+        """
+        Updates the index mappings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-put-mapping.html>`_
+
+        :arg body: The mapping definition
+        :arg index: A comma-separated list of index names the mapping
+            should be added to (supports wildcards); use `_all` or omit to add the
+            mapping on all indices.
+        :arg doc_type: The name of the document type
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg include_type_name: Whether a type should be expected in the
+            body of the mappings.
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        if doc_type not in SKIP_IN_PATH and index in SKIP_IN_PATH:
+            index = "_all"
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path(index, doc_type, "_mapping"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "include_type_name",
+        "local",
+        "master_timeout",
+    )
+    def get_mapping(self, index=None, doc_type=None, params=None, headers=None):
+        """
+        Returns mappings for one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-get-mapping.html>`_
+
+        :arg index: A comma-separated list of index names
+        :arg doc_type: A comma-separated list of document types
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg include_type_name: Whether to add the type name to the
+            response (default: false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path(index, "_mapping", doc_type),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "timeout")
+    def put_alias(self, index, name, body=None, params=None, headers=None):
+        """
+        Creates or updates an alias.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-aliases.html>`_
+
+        :arg index: A comma-separated list of index names the alias
+            should point to (supports wildcards); use `_all` to perform the
+            operation on all indices.
+        :arg name: The name of the alias to be created or updated
+        :arg body: The settings for the alias, such as `routing` or
+            `filter`
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit timestamp for the document
+        """
+        for param in (index, name):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_alias", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local")
+    def exists_alias(self, name, index=None, params=None, headers=None):
+        """
+        Returns information about whether a particular alias exists.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-aliases.html>`_
+
+        :arg name: A comma-separated list of alias names to return
+        :arg index: A comma-separated list of index names to filter
+            aliases
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "HEAD", _make_path(index, "_alias", name), params=params, headers=headers
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local")
+    def get_alias(self, index=None, name=None, params=None, headers=None):
+        """
+        Returns an alias.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-aliases.html>`_
+
+        :arg index: A comma-separated list of index names to filter
+            aliases
+        :arg name: A comma-separated list of alias names to return
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_alias", name), params=params, headers=headers
+        )
+
+    @query_params("master_timeout", "timeout")
+    def update_aliases(self, body, params=None, headers=None):
+        """
+        Updates index aliases.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-aliases.html>`_
+
+        :arg body: The definition of `actions` to perform
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Request timeout
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST", "/_aliases", params=params, headers=headers, body=body
+        )
+
+    @query_params("master_timeout", "timeout")
+    def delete_alias(self, index, name, params=None, headers=None):
+        """
+        Deletes an alias.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-aliases.html>`_
+
+        :arg index: A comma-separated list of index names (supports
+            wildcards); use `_all` for all indices
+        :arg name: A comma-separated list of aliases to delete (supports
+            wildcards); use `_all` to delete all aliases for the specified indices.
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit timestamp for the document
+        """
+        for param in (index, name):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "DELETE", _make_path(index, "_alias", name), params=params, headers=headers
+        )
+
+    @query_params("create", "include_type_name", "master_timeout", "order")
+    def put_template(self, name, body, params=None, headers=None):
+        """
+        Creates or updates an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the template
+        :arg body: The template definition
+        :arg create: Whether the index template should only be added if
+            new or can also replace an existing one
+        :arg include_type_name: Whether a type should be returned in the
+            body of the mappings.
+        :arg master_timeout: Specify timeout for connection to master
+        :arg order: The order for this template when merging multiple
+            matching ones (higher numbers are merged later, overriding the lower
+            numbers)
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_template", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("flat_settings", "local", "master_timeout")
+    def exists_template(self, name, params=None, headers=None):
+        """
+        Returns information about whether a particular index template exists.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The comma separated names of the index templates
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "HEAD", _make_path("_template", name), params=params, headers=headers
+        )
+
+    @query_params("flat_settings", "include_type_name", "local", "master_timeout")
+    def get_template(self, name=None, params=None, headers=None):
+        """
+        Returns an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The comma separated names of the index templates
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg include_type_name: Whether a type should be returned in the
+            body of the mappings.
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_template", name), params=params, headers=headers
+        )
+
+    @query_params("master_timeout", "timeout")
+    def delete_template(self, name, params=None, headers=None):
+        """
+        Deletes an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the template
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "DELETE", _make_path("_template", name), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flat_settings",
+        "ignore_unavailable",
+        "include_defaults",
+        "local",
+        "master_timeout",
+    )
+    def get_settings(self, index=None, name=None, params=None, headers=None):
+        """
+        Returns settings for one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-get-settings.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg name: The name of the settings that should be included
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: all
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg include_defaults: Whether to return all default setting for
+            each of the indices.
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_settings", name), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flat_settings",
+        "ignore_unavailable",
+        "master_timeout",
+        "preserve_existing",
+        "timeout",
+    )
+    def put_settings(self, body, index=None, params=None, headers=None):
+        """
+        Updates the index settings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-update-settings.html>`_
+
+        :arg body: The index settings to be updated
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg preserve_existing: Whether to update existing settings. If
+            set to `true` existing settings on an index remain unchanged, the
+            default is `false`
+        :arg timeout: Explicit operation timeout
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_settings"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "completion_fields",
+        "expand_wildcards",
+        "fielddata_fields",
+        "fields",
+        "forbid_closed_indices",
+        "groups",
+        "include_segment_file_sizes",
+        "include_unloaded_segments",
+        "level",
+        "types",
+    )
+    def stats(self, index=None, metric=None, params=None, headers=None):
+        """
+        Provides statistics on operations happening in an index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-stats.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg metric: Limit the information returned the specific
+            metrics.  Valid choices: _all, completion, docs, fielddata, query_cache,
+            flush, get, indexing, merge, request_cache, refresh, search, segments,
+            store, warmer, suggest
+        :arg completion_fields: A comma-separated list of fields for
+            `fielddata` and `suggest` index metric (supports wildcards)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg fielddata_fields: A comma-separated list of fields for
+            `fielddata` index metric (supports wildcards)
+        :arg fields: A comma-separated list of fields for `fielddata`
+            and `completion` index metric (supports wildcards)
+        :arg forbid_closed_indices: If set to false stats will also
+            collected from closed indices if explicitly specified or if
+            expand_wildcards expands to closed indices  Default: True
+        :arg groups: A comma-separated list of search groups for
+            `search` index metric
+        :arg include_segment_file_sizes: Whether to report the
+            aggregated disk usage of each one of the Lucene index files (only
+            applies if segment stats are requested)
+        :arg include_unloaded_segments: If set to true segment stats
+            will include stats for segments that are not currently loaded into
+            memory
+        :arg level: Return stats aggregated at cluster, index or shard
+            level  Valid choices: cluster, indices, shards  Default: indices
+        :arg types: A comma-separated list of document types for the
+            `indexing` index metric
+        """
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_stats", metric), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices", "expand_wildcards", "ignore_unavailable", "verbose"
+    )
+    def segments(self, index=None, params=None, headers=None):
+        """
+        Provides low-level information about segments in a Lucene index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-segments.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg verbose: Includes detailed memory usage by Lucene.
+        """
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_segments"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "fielddata",
+        "fields",
+        "ignore_unavailable",
+        "query",
+        "request",
+    )
+    def clear_cache(self, index=None, params=None, headers=None):
+        """
+        Clears all or specific caches for one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-clearcache.html>`_
+
+        :arg index: A comma-separated list of index name to limit the
+            operation
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg fielddata: Clear field data
+        :arg fields: A comma-separated list of fields to clear when
+            using the `fielddata` parameter (default: all)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg query: Clear query caches
+        :arg request: Clear request cache
+        """
+        return self.transport.perform_request(
+            "POST", _make_path(index, "_cache", "clear"), params=params, headers=headers
+        )
+
+    @query_params("active_only", "detailed")
+    def recovery(self, index=None, params=None, headers=None):
+        """
+        Returns information about ongoing index shard recoveries.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-recovery.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg active_only: Display only those recoveries that are
+            currently on-going
+        :arg detailed: Whether to display detailed information about
+            shard recovery
+        """
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_recovery"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "only_ancient_segments",
+        "wait_for_completion",
+    )
+    def upgrade(self, index=None, params=None, headers=None):
+        """
+        The _upgrade API is no longer useful and will be removed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-upgrade.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg only_ancient_segments: If true, only ancient (an older
+            Lucene major release) segments will be upgraded
+        :arg wait_for_completion: Specify whether the request should
+            block until the all segments are upgraded (default: false)
+        """
+        return self.transport.perform_request(
+            "POST", _make_path(index, "_upgrade"), params=params, headers=headers
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
+    def get_upgrade(self, index=None, params=None, headers=None):
+        """
+        The _upgrade API is no longer useful and will be removed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-upgrade.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        """
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_upgrade"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices", "expand_wildcards", "ignore_unavailable", "status"
+    )
+    def shard_stores(self, index=None, params=None, headers=None):
+        """
+        Provides store information for shard copies of indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-shards-stores.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg status: A comma-separated list of statuses used to filter
+            on shards to get store information for  Valid choices: green, yellow,
+            red, all
+        """
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_shard_stores"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "flush",
+        "ignore_unavailable",
+        "max_num_segments",
+        "only_expunge_deletes",
+    )
+    def forcemerge(self, index=None, params=None, headers=None):
+        """
+        Performs the force merge operation on one or more indices.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-forcemerge.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string to perform the operation on all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg flush: Specify whether the index should be flushed after
+            performing the operation (default: true)
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg max_num_segments: The number of segments the index should
+            be merged into (default: dynamic)
+        :arg only_expunge_deletes: Specify whether the operation should
+            only expunge deleted documents
+        """
+        return self.transport.perform_request(
+            "POST", _make_path(index, "_forcemerge"), params=params, headers=headers
+        )
+
+    @query_params(
+        "copy_settings", "master_timeout", "timeout", "wait_for_active_shards"
+    )
+    def shrink(self, index, target, body=None, params=None, headers=None):
+        """
+        Allow to shrink an existing index into a new index with fewer primary shards.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-shrink-index.html>`_
+
+        :arg index: The name of the source index to shrink
+        :arg target: The name of the target index to shrink into
+        :arg body: The configuration for the target index (`settings`
+            and `aliases`)
+        :arg copy_settings: whether or not to copy settings from the
+            source index (defaults to false)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Set the number of active shards to
+            wait for on the shrunken index before the operation returns.
+        """
+        for param in (index, target):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_shrink", target),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "copy_settings", "master_timeout", "timeout", "wait_for_active_shards"
+    )
+    def split(self, index, target, body=None, params=None, headers=None):
+        """
+        Allows you to split an existing index into a new index with more primary
+        shards.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-split-index.html>`_
+
+        :arg index: The name of the source index to split
+        :arg target: The name of the target index to split into
+        :arg body: The configuration for the target index (`settings`
+            and `aliases`)
+        :arg copy_settings: whether or not to copy settings from the
+            source index (defaults to false)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Set the number of active shards to
+            wait for on the shrunken index before the operation returns.
+        """
+        for param in (index, target):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path(index, "_split", target),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "dry_run",
+        "include_type_name",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    def rollover(self, alias, body=None, new_index=None, params=None, headers=None):
+        """
+        Updates an alias to point to a new index when the existing index is considered
+        to be too large or too old.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-rollover-index.html>`_
+
+        :arg alias: The name of the alias to rollover
+        :arg body: The conditions that needs to be met for executing
+            rollover
+        :arg new_index: The name of the rollover index
+        :arg dry_run: If set to true the rollover action will only be
+            validated but not actually performed even if a condition matches. The
+            default is false
+        :arg include_type_name: Whether a type should be included in the
+            body of the mappings.
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Set the number of active shards to
+            wait for on the newly created rollover index before the operation
+            returns.
+        """
+        if alias in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'alias'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(alias, "_rollover", new_index),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    def freeze(self, index, params=None, headers=None):
+        """
+        Freezes an index. A frozen index has almost no overhead on the cluster (except
+        for maintaining its metadata in memory) and is read-only.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/freeze-index-api.html>`_
+
+        :arg index: The name of the index to freeze
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: closed
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of active shards to
+            wait for before the operation returns.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST", _make_path(index, "_freeze"), params=params, headers=headers
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "master_timeout",
+        "timeout",
+        "wait_for_active_shards",
+    )
+    def unfreeze(self, index, params=None, headers=None):
+        """
+        Unfreezes an index. When a frozen index is unfrozen, the index goes through the
+        normal recovery process and becomes writeable again.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/unfreeze-index-api.html>`_
+
+        :arg index: The name of the index to unfreeze
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: closed
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_active_shards: Sets the number of active shards to
+            wait for before the operation returns.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "POST", _make_path(index, "_unfreeze"), params=params, headers=headers
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
+    def reload_search_analyzers(self, index, params=None, headers=None):
+        """
+        Reloads an index's search analyzers and their resources.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-reload-analyzers.html>`_
+
+        :arg index: A comma-separated list of index names to reload
+            analyzers for
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path(index, "_reload_search_analyzers"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_indices",
+        "expand_wildcards",
+        "ignore_unavailable",
+        "include_defaults",
+        "include_type_name",
+        "local",
+    )
+    def get_field_mapping(
+        self, fields, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Returns mapping for one or more fields.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-get-field-mapping.html>`_
+
+        :arg fields: A comma-separated list of fields
+        :arg index: A comma-separated list of index names
+        :arg doc_type: A comma-separated list of document types
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg include_defaults: Whether the default mapping values should
+            be returned as well
+        :arg include_type_name: Whether a type should be returned in the
+            body of the mappings.
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        if fields in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'fields'.")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path(index, "_mapping", doc_type, "field", fields),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "all_shards",
+        "allow_no_indices",
+        "analyze_wildcard",
+        "analyzer",
+        "default_operator",
+        "df",
+        "expand_wildcards",
+        "explain",
+        "ignore_unavailable",
+        "lenient",
+        "q",
+        "rewrite",
+    )
+    def validate_query(
+        self, body=None, index=None, doc_type=None, params=None, headers=None
+    ):
+        """
+        Allows a user to validate a potentially expensive query without executing it.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-validate.html>`_
+
+        :arg body: The query definition specified with the Query DSL
+        :arg index: A comma-separated list of index names to restrict
+            the operation; use `_all` or empty string to perform the operation on
+            all indices
+        :arg doc_type: A comma-separated list of document types to
+            restrict the operation; leave empty to perform the operation on all
+            types
+        :arg all_shards: Execute validation on all shards instead of one
+            random shard per index
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg analyze_wildcard: Specify whether wildcard and prefix
+            queries should be analyzed (default: false)
+        :arg analyzer: The analyzer to use for the query string
+        :arg default_operator: The default operator for query string
+            query (AND or OR)  Valid choices: AND, OR  Default: OR
+        :arg df: The field to use as default where no field prefix is
+            given in the query string
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, hidden, none, all  Default: open
+        :arg explain: Return detailed information about the error
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        :arg lenient: Specify whether format-based query failures (such
+            as providing text to a numeric field) should be ignored
+        :arg q: Query in the Lucene query string syntax
+        :arg rewrite: Provide a more detailed explanation showing the
+            actual Lucene query that will be executed.
+        """
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_validate", "query"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def create_data_stream(self, name, body, params=None, headers=None):
+        """
+        Creates or updates a data stream
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/data-streams.html>`_
+
+        :arg name: The name of the data stream
+        :arg body: The data stream definition
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_data_stream", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def delete_data_stream(self, name, params=None, headers=None):
+        """
+        Deletes a data stream.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/data-streams.html>`_
+
+        :arg name: The name of the data stream
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "DELETE", _make_path("_data_stream", name), params=params, headers=headers
+        )
+
+    @query_params()
+    def get_data_streams(self, name=None, params=None, headers=None):
+        """
+        Returns data streams.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/data-streams.html>`_
+
+        :arg name: The name or wildcard expression of the requested data
+            streams
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_data_streams", name), params=params, headers=headers
+        )
+
+    @query_params("master_timeout", "timeout")
+    def delete_index_template(self, name, params=None, headers=None):
+        """
+        Deletes an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the template
+        :arg master_timeout: Specify timeout for connection to master
+        :arg timeout: Explicit operation timeout
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_index_template", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("flat_settings", "local", "master_timeout")
+    def get_index_template(self, name=None, params=None, headers=None):
+        """
+        Returns an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The comma separated names of the index templates
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_index_template", name), params=params, headers=headers
+        )
+
+    @query_params("cause", "create", "master_timeout")
+    def put_index_template(self, name, body, params=None, headers=None):
+        """
+        Creates or updates an index template.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the template
+        :arg body: The template definition
+        :arg cause: User defined reason for creating/updating the index
+            template
+        :arg create: Whether the index template should only be added if
+            new or can also replace an existing one
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_index_template", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("flat_settings", "local", "master_timeout")
+    def exists_index_template(self, name, params=None, headers=None):
+        """
+        Returns information about whether a particular index template exists.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the template
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "HEAD", _make_path("_index_template", name), params=params, headers=headers
+        )
+
+    @query_params("cause", "create", "master_timeout")
+    def simulate_index_template(self, name, body=None, params=None, headers=None):
+        """
+        Simulate matching the given index name against the index templates in the
+        system
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-templates.html>`_
+
+        :arg name: The name of the index (it must be a concrete index
+            name)
+        :arg body: New index template definition, which will be included
+            in the simulation, as if it already exists in the system
+        :arg cause: User defined reason for dry-run creating the new
+            template for simulation purposes
+        :arg create: Whether the index template we optionally defined in
+            the body should only be dry-run added if new or can also replace an
+            existing one
+        :arg master_timeout: Specify timeout for connection to master
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_index_template", "_simulate_index", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
+    def flush_synced(self, index=None, params=None, headers=None):
+        """
+        Performs a synced flush operation on one or more indices. Synced flush is
+        deprecated and will be removed in 8.0. Use flush instead
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/indices-synced-flush-api.html>`_
+
+        :arg index: A comma-separated list of index names; use `_all` or
+            empty string for all indices
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        """
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, "_flush", "synced"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/client/ingest.py b/elasticsearch_7/client/ingest.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2f1bcbc381b9d58d276e54d18eed8aefb6d35a5
--- /dev/null
+++ b/elasticsearch_7/client/ingest.py
@@ -0,0 +1,99 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class IngestClient(NamespacedClient):
+    @query_params("master_timeout")
+    def get_pipeline(self, id=None, params=None, headers=None):
+        """
+        Returns a pipeline.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-pipeline-api.html>`_
+
+        :arg id: Comma separated list of pipeline ids. Wildcards
+            supported
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_ingest", "pipeline", id), params=params, headers=headers
+        )
+
+    @query_params("master_timeout", "timeout")
+    def put_pipeline(self, id, body, params=None, headers=None):
+        """
+        Creates or updates a pipeline.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/put-pipeline-api.html>`_
+
+        :arg id: Pipeline ID
+        :arg body: The ingest definition
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        for param in (id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_ingest", "pipeline", id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("master_timeout", "timeout")
+    def delete_pipeline(self, id, params=None, headers=None):
+        """
+        Deletes a pipeline.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-pipeline-api.html>`_
+
+        :arg id: Pipeline ID
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ingest", "pipeline", id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("verbose")
+    def simulate(self, body, id=None, params=None, headers=None):
+        """
+        Allows to simulate a pipeline with example documents.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/simulate-pipeline-api.html>`_
+
+        :arg body: The simulate definition
+        :arg id: Pipeline ID
+        :arg verbose: Verbose mode. Display data output for each
+            processor in executed pipeline
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ingest", "pipeline", id, "_simulate"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def processor_grok(self, params=None, headers=None):
+        """
+        Returns a list of the built-in patterns.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/grok-processor.html#grok-processor-rest-get>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_ingest/processor/grok", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/client/license.py b/elasticsearch_7/client/license.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7ad929fc0838489b2668e71dfca528a335db984
--- /dev/null
+++ b/elasticsearch_7/client/license.py
@@ -0,0 +1,98 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params
+
+
+class LicenseClient(NamespacedClient):
+    @query_params()
+    def delete(self, params=None, headers=None):
+        """
+        Deletes licensing information for the cluster
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-license.html>`_
+        """
+        return self.transport.perform_request(
+            "DELETE", "/_license", params=params, headers=headers
+        )
+
+    @query_params("accept_enterprise", "local")
+    def get(self, params=None, headers=None):
+        """
+        Retrieves licensing information for the cluster
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-license.html>`_
+
+        :arg accept_enterprise: If the active license is an enterprise
+            license, return type as 'enterprise' (default: false)
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        """
+        return self.transport.perform_request(
+            "GET", "/_license", params=params, headers=headers
+        )
+
+    @query_params()
+    def get_basic_status(self, params=None, headers=None):
+        """
+        Retrieves information about the status of the basic license.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-basic-status.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_license/basic_status", params=params, headers=headers
+        )
+
+    @query_params()
+    def get_trial_status(self, params=None, headers=None):
+        """
+        Retrieves information about the status of the trial license.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-trial-status.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_license/trial_status", params=params, headers=headers
+        )
+
+    @query_params("acknowledge")
+    def post(self, body=None, params=None, headers=None):
+        """
+        Updates the license for the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/update-license.html>`_
+
+        :arg body: licenses to be installed
+        :arg acknowledge: whether the user has acknowledged acknowledge
+            messages (default: false)
+        """
+        return self.transport.perform_request(
+            "PUT", "/_license", params=params, headers=headers, body=body
+        )
+
+    @query_params("acknowledge")
+    def post_start_basic(self, params=None, headers=None):
+        """
+        Starts an indefinite basic license.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/start-basic.html>`_
+
+        :arg acknowledge: whether the user has acknowledged acknowledge
+            messages (default: false)
+        """
+        return self.transport.perform_request(
+            "POST", "/_license/start_basic", params=params, headers=headers
+        )
+
+    @query_params("acknowledge", "doc_type")
+    def post_start_trial(self, params=None, headers=None):
+        """
+        starts a limited time trial license.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/start-trial.html>`_
+
+        :arg acknowledge: whether the user has acknowledged acknowledge
+            messages (default: false)
+        :arg doc_type: The type of trial license to generate (default:
+            "trial")
+        """
+        # type is a reserved word so it cannot be used, use doc_type instead
+        if "doc_type" in params:
+            params["type"] = params.pop("doc_type")
+
+        return self.transport.perform_request(
+            "POST", "/_license/start_trial", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/client/migration.py b/elasticsearch_7/client/migration.py
new file mode 100644
index 0000000000000000000000000000000000000000..998e5edb8b04dea1dd879f3f34ae3453f226d98d
--- /dev/null
+++ b/elasticsearch_7/client/migration.py
@@ -0,0 +1,24 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path
+
+
+class MigrationClient(NamespacedClient):
+    @query_params()
+    def deprecations(self, index=None, params=None, headers=None):
+        """
+        Retrieves information about different cluster, node, and index level settings
+        that use deprecated features that will be removed or changed in the next major
+        version.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/migration-api-deprecation.html>`_
+
+        :arg index: Index pattern
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path(index, "_migration", "deprecations"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/client/ml.py b/elasticsearch_7/client/ml.py
new file mode 100644
index 0000000000000000000000000000000000000000..1871c6b47b38806a7bf4d5276a8bc724b879b386
--- /dev/null
+++ b/elasticsearch_7/client/ml.py
@@ -0,0 +1,1490 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body
+
+
+class MlClient(NamespacedClient):
+    @query_params("allow_no_jobs", "force", "timeout")
+    def close_job(self, job_id, body=None, params=None, headers=None):
+        """
+        Closes one or more anomaly detection jobs. A job can be opened and closed
+        multiple times throughout its lifecycle.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-close-job.html>`_
+
+        :arg job_id: The name of the job to close
+        :arg body: The URL params optionally sent in the body
+        :arg allow_no_jobs: Whether to ignore if a wildcard expression
+            matches no jobs. (This includes `_all` string or when no jobs have been
+            specified)
+        :arg force: True if the job should be forcefully closed
+        :arg timeout: Controls the time to wait until a job has closed.
+            Default to 30 minutes
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_close"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def delete_calendar(self, calendar_id, params=None, headers=None):
+        """
+        Deletes a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-calendar.html>`_
+
+        :arg calendar_id: The ID of the calendar to delete
+        """
+        if calendar_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'calendar_id'."
+            )
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "calendars", calendar_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def delete_calendar_event(self, calendar_id, event_id, params=None, headers=None):
+        """
+        Deletes scheduled events from a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-calendar-event.html>`_
+
+        :arg calendar_id: The ID of the calendar to modify
+        :arg event_id: The ID of the event to remove from the calendar
+        """
+        for param in (calendar_id, event_id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "calendars", calendar_id, "events", event_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def delete_calendar_job(self, calendar_id, job_id, params=None, headers=None):
+        """
+        Deletes anomaly detection jobs from a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-calendar-job.html>`_
+
+        :arg calendar_id: The ID of the calendar to modify
+        :arg job_id: The ID of the job to remove from the calendar
+        """
+        for param in (calendar_id, job_id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "calendars", calendar_id, "jobs", job_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("force")
+    def delete_datafeed(self, datafeed_id, params=None, headers=None):
+        """
+        Deletes an existing datafeed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to delete
+        :arg force: True if the datafeed should be forcefully deleted
+        """
+        if datafeed_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'datafeed_id'."
+            )
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "datafeeds", datafeed_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def delete_expired_data(self, body=None, params=None, headers=None):
+        """
+        Deletes expired and unused machine learning data.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-expired-data.html>`_
+
+        :arg body: deleting expired data parameters
+        """
+        return self.transport.perform_request(
+            "DELETE",
+            "/_ml/_delete_expired_data",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def delete_filter(self, filter_id, params=None, headers=None):
+        """
+        Deletes a filter.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-filter.html>`_
+
+        :arg filter_id: The ID of the filter to delete
+        """
+        if filter_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'filter_id'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "filters", filter_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_forecasts", "timeout")
+    def delete_forecast(self, job_id, forecast_id=None, params=None, headers=None):
+        """
+        Deletes forecasts from a machine learning job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-forecast.html>`_
+
+        :arg job_id: The ID of the job from which to delete forecasts
+        :arg forecast_id: The ID of the forecast to delete, can be comma
+            delimited list. Leaving blank implies `_all`
+        :arg allow_no_forecasts: Whether to ignore if `_all` matches no
+            forecasts
+        :arg timeout: Controls the time to wait until the forecast(s)
+            are deleted. Default to 30 seconds
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "anomaly_detectors", job_id, "_forecast", forecast_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("force", "wait_for_completion")
+    def delete_job(self, job_id, params=None, headers=None):
+        """
+        Deletes an existing anomaly detection job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-job.html>`_
+
+        :arg job_id: The ID of the job to delete
+        :arg force: True if the job should be forcefully deleted
+        :arg wait_for_completion: Should this request wait until the
+            operation has completed before returning  Default: True
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "anomaly_detectors", job_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def delete_model_snapshot(self, job_id, snapshot_id, params=None, headers=None):
+        """
+        Deletes an existing model snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-delete-snapshot.html>`_
+
+        :arg job_id: The ID of the job to fetch
+        :arg snapshot_id: The ID of the snapshot to delete
+        """
+        for param in (job_id, snapshot_id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path(
+                "_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id
+            ),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "charset",
+        "column_names",
+        "delimiter",
+        "explain",
+        "format",
+        "grok_pattern",
+        "has_header_row",
+        "line_merge_size_limit",
+        "lines_to_sample",
+        "quote",
+        "should_trim_fields",
+        "timeout",
+        "timestamp_field",
+        "timestamp_format",
+    )
+    def find_file_structure(self, body, params=None, headers=None):
+        """
+        Finds the structure of a text file. The text file must contain data that is
+        suitable to be ingested into Elasticsearch.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-find-file-structure.html>`_
+
+        :arg body: The contents of the file to be analyzed
+        :arg charset: Optional parameter to specify the character set of
+            the file
+        :arg column_names: Optional parameter containing a comma
+            separated list of the column names for a delimited file
+        :arg delimiter: Optional parameter to specify the delimiter
+            character for a delimited file - must be a single character
+        :arg explain: Whether to include a commentary on how the
+            structure was derived
+        :arg format: Optional parameter to specify the high level file
+            format  Valid choices: ndjson, xml, delimited, semi_structured_text
+        :arg grok_pattern: Optional parameter to specify the Grok
+            pattern that should be used to extract fields from messages in a semi-
+            structured text file
+        :arg has_header_row: Optional parameter to specify whether a
+            delimited file includes the column names in its first row
+        :arg line_merge_size_limit: Maximum number of characters
+            permitted in a single message when lines are merged to create messages.
+            Default: 10000
+        :arg lines_to_sample: How many lines of the file should be
+            included in the analysis  Default: 1000
+        :arg quote: Optional parameter to specify the quote character
+            for a delimited file - must be a single character
+        :arg should_trim_fields: Optional parameter to specify whether
+            the values between delimiters in a delimited file should have whitespace
+            trimmed from them
+        :arg timeout: Timeout after which the analysis will be aborted
+            Default: 25s
+        :arg timestamp_field: Optional parameter to specify the
+            timestamp field in the file
+        :arg timestamp_format: Optional parameter to specify the
+            timestamp format in the file - may be either a Joda or Java time format
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return self.transport.perform_request(
+            "POST",
+            "/_ml/find_file_structure",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("advance_time", "calc_interim", "end", "skip_time", "start")
+    def flush_job(self, job_id, body=None, params=None, headers=None):
+        """
+        Forces any buffered data to be processed by the job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-flush-job.html>`_
+
+        :arg job_id: The name of the job to flush
+        :arg body: Flush parameters
+        :arg advance_time: Advances time to the given value generating
+            results and updating the model for the advanced interval
+        :arg calc_interim: Calculates interim results for the most
+            recent bucket or all buckets within the latency period
+        :arg end: When used in conjunction with calc_interim, specifies
+            the range of buckets on which to calculate interim results
+        :arg skip_time: Skips time to the given value without generating
+            results or updating the model for the skipped interval
+        :arg start: When used in conjunction with calc_interim,
+            specifies the range of buckets on which to calculate interim results
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_flush"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("duration", "expires_in")
+    def forecast(self, job_id, params=None, headers=None):
+        """
+        Predicts the future behavior of a time series by using its historical behavior.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-forecast.html>`_
+
+        :arg job_id: The ID of the job to forecast for
+        :arg duration: The duration of the forecast
+        :arg expires_in: The time interval after which the forecast
+            expires. Expired forecasts will be deleted at the first opportunity.
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_forecast"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "anomaly_score",
+        "desc",
+        "end",
+        "exclude_interim",
+        "expand",
+        "from_",
+        "size",
+        "sort",
+        "start",
+    )
+    def get_buckets(self, job_id, body=None, timestamp=None, params=None, headers=None):
+        """
+        Retrieves anomaly detection job results for one or more buckets.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-bucket.html>`_
+
+        :arg job_id: ID of the job to get bucket results from
+        :arg body: Bucket selection details if not provided in URI
+        :arg timestamp: The timestamp of the desired single bucket
+            result
+        :arg anomaly_score: Filter for the most anomalous buckets
+        :arg desc: Set the sort direction
+        :arg end: End time filter for buckets
+        :arg exclude_interim: Exclude interim results
+        :arg expand: Include anomaly records
+        :arg from_: skips a number of buckets
+        :arg size: specifies a max number of buckets to get
+        :arg sort: Sort buckets by a particular field
+        :arg start: Start time filter for buckets
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml", "anomaly_detectors", job_id, "results", "buckets", timestamp
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("end", "from_", "job_id", "size", "start")
+    def get_calendar_events(self, calendar_id, params=None, headers=None):
+        """
+        Retrieves information about the scheduled events in calendars.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-calendar-event.html>`_
+
+        :arg calendar_id: The ID of the calendar containing the events
+        :arg end: Get events before this time
+        :arg from_: Skips a number of events
+        :arg job_id: Get events for the job. When this option is used
+            calendar_id must be '_all'
+        :arg size: Specifies a max number of events to get
+        :arg start: Get events after this time
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if calendar_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'calendar_id'."
+            )
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "calendars", calendar_id, "events"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("from_", "size")
+    def get_calendars(self, body=None, calendar_id=None, params=None, headers=None):
+        """
+        Retrieves configuration information for calendars.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-calendar.html>`_
+
+        :arg body: The from and size parameters optionally sent in the
+            body
+        :arg calendar_id: The ID of the calendar to fetch
+        :arg from_: skips a number of calendars
+        :arg size: specifies a max number of calendars to get
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "calendars", calendar_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_datafeeds")
+    def get_datafeed_stats(self, datafeed_id=None, params=None, headers=None):
+        """
+        Retrieves usage information for datafeeds.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-datafeed-stats.html>`_
+
+        :arg datafeed_id: The ID of the datafeeds stats to fetch
+        :arg allow_no_datafeeds: Whether to ignore if a wildcard
+            expression matches no datafeeds. (This includes `_all` string or when no
+            datafeeds have been specified)
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "datafeeds", datafeed_id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_datafeeds")
+    def get_datafeeds(self, datafeed_id=None, params=None, headers=None):
+        """
+        Retrieves configuration information for datafeeds.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeeds to fetch
+        :arg allow_no_datafeeds: Whether to ignore if a wildcard
+            expression matches no datafeeds. (This includes `_all` string or when no
+            datafeeds have been specified)
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "datafeeds", datafeed_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("from_", "size")
+    def get_filters(self, filter_id=None, params=None, headers=None):
+        """
+        Retrieves filters.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-filter.html>`_
+
+        :arg filter_id: The ID of the filter to fetch
+        :arg from_: skips a number of filters
+        :arg size: specifies a max number of filters to get
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "filters", filter_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "desc",
+        "end",
+        "exclude_interim",
+        "from_",
+        "influencer_score",
+        "size",
+        "sort",
+        "start",
+    )
+    def get_influencers(self, job_id, body=None, params=None, headers=None):
+        """
+        Retrieves anomaly detection job results for one or more influencers.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-influencer.html>`_
+
+        :arg job_id: Identifier for the anomaly detection job
+        :arg body: Influencer selection criteria
+        :arg desc: whether the results should be sorted in decending
+            order
+        :arg end: end timestamp for the requested influencers
+        :arg exclude_interim: Exclude interim results
+        :arg from_: skips a number of influencers
+        :arg influencer_score: influencer score threshold for the
+            requested influencers
+        :arg size: specifies a max number of influencers to get
+        :arg sort: sort field for the requested influencers
+        :arg start: start timestamp for the requested influencers
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "results", "influencers"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_jobs")
+    def get_job_stats(self, job_id=None, params=None, headers=None):
+        """
+        Retrieves usage information for anomaly detection jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-job-stats.html>`_
+
+        :arg job_id: The ID of the jobs stats to fetch
+        :arg allow_no_jobs: Whether to ignore if a wildcard expression
+            matches no jobs. (This includes `_all` string or when no jobs have been
+            specified)
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "anomaly_detectors", job_id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_jobs")
+    def get_jobs(self, job_id=None, params=None, headers=None):
+        """
+        Retrieves configuration information for anomaly detection jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-job.html>`_
+
+        :arg job_id: The ID of the jobs to fetch
+        :arg allow_no_jobs: Whether to ignore if a wildcard expression
+            matches no jobs. (This includes `_all` string or when no jobs have been
+            specified)
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "anomaly_detectors", job_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_jobs",
+        "bucket_span",
+        "end",
+        "exclude_interim",
+        "overall_score",
+        "start",
+        "top_n",
+    )
+    def get_overall_buckets(self, job_id, body=None, params=None, headers=None):
+        """
+        Retrieves overall bucket results that summarize the bucket results of multiple
+        anomaly detection jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-overall-buckets.html>`_
+
+        :arg job_id: The job IDs for which to calculate overall bucket
+            results
+        :arg body: Overall bucket selection details if not provided in
+            URI
+        :arg allow_no_jobs: Whether to ignore if a wildcard expression
+            matches no jobs. (This includes `_all` string or when no jobs have been
+            specified)
+        :arg bucket_span: The span of the overall buckets. Defaults to
+            the longest job bucket_span
+        :arg end: Returns overall buckets with timestamps earlier than
+            this time
+        :arg exclude_interim: If true overall buckets that include
+            interim buckets will be excluded
+        :arg overall_score: Returns overall buckets with overall scores
+            higher than this value
+        :arg start: Returns overall buckets with timestamps after this
+            time
+        :arg top_n: The number of top job bucket scores to be used in
+            the overall_score calculation
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml", "anomaly_detectors", job_id, "results", "overall_buckets"
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params(
+        "desc",
+        "end",
+        "exclude_interim",
+        "from_",
+        "record_score",
+        "size",
+        "sort",
+        "start",
+    )
+    def get_records(self, job_id, body=None, params=None, headers=None):
+        """
+        Retrieves anomaly records for an anomaly detection job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-record.html>`_
+
+        :arg job_id: The ID of the job
+        :arg body: Record selection criteria
+        :arg desc: Set the sort direction
+        :arg end: End time filter for records
+        :arg exclude_interim: Exclude interim results
+        :arg from_: skips a number of records
+        :arg record_score: Returns records with anomaly scores greater
+            or equal than this value
+        :arg size: specifies a max number of records to get
+        :arg sort: Sort records by a particular field
+        :arg start: Start time filter for records
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "results", "records"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def info(self, params=None, headers=None):
+        """
+        Returns defaults and limits used by machine learning.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-ml-info.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_ml/info", params=params, headers=headers
+        )
+
+    @query_params()
+    def open_job(self, job_id, params=None, headers=None):
+        """
+        Opens one or more anomaly detection jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-open-job.html>`_
+
+        :arg job_id: The ID of the job to open
+        """
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_open"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def post_calendar_events(self, calendar_id, body, params=None, headers=None):
+        """
+        Posts scheduled events in a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-post-calendar-event.html>`_
+
+        :arg calendar_id: The ID of the calendar to modify
+        :arg body: A list of events
+        """
+        for param in (calendar_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "calendars", calendar_id, "events"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("reset_end", "reset_start")
+    def post_data(self, job_id, body, params=None, headers=None):
+        """
+        Sends data to an anomaly detection job for analysis.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-post-data.html>`_
+
+        :arg job_id: The name of the job receiving the data
+        :arg body: The data to process
+        :arg reset_end: Optional parameter to specify the end of the
+            bucket resetting range
+        :arg reset_start: Optional parameter to specify the start of the
+            bucket resetting range
+        """
+        for param in (job_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_data"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def preview_datafeed(self, datafeed_id, params=None, headers=None):
+        """
+        Previews a datafeed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-preview-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to preview
+        """
+        if datafeed_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'datafeed_id'."
+            )
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "datafeeds", datafeed_id, "_preview"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def put_calendar(self, calendar_id, body=None, params=None, headers=None):
+        """
+        Instantiates a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-put-calendar.html>`_
+
+        :arg calendar_id: The ID of the calendar to create
+        :arg body: The calendar details
+        """
+        if calendar_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'calendar_id'."
+            )
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "calendars", calendar_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def put_calendar_job(self, calendar_id, job_id, params=None, headers=None):
+        """
+        Adds an anomaly detection job to a calendar.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-put-calendar-job.html>`_
+
+        :arg calendar_id: The ID of the calendar to modify
+        :arg job_id: The ID of the job to add to the calendar
+        """
+        for param in (calendar_id, job_id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "calendars", calendar_id, "jobs", job_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_indices", "expand_wildcards", "ignore_throttled", "ignore_unavailable"
+    )
+    def put_datafeed(self, datafeed_id, body, params=None, headers=None):
+        """
+        Instantiates a datafeed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-put-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to create
+        :arg body: The datafeed config
+        :arg allow_no_indices: Ignore if the source indices expressions
+            resolves to no concrete indices (default: true)
+        :arg expand_wildcards: Whether source index expressions should
+            get expanded to open or closed indices (default: open)  Valid choices:
+            open, closed, hidden, none, all
+        :arg ignore_throttled: Ignore indices that are marked as
+            throttled (default: true)
+        :arg ignore_unavailable: Ignore unavailable indexes (default:
+            false)
+        """
+        for param in (datafeed_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "datafeeds", datafeed_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def put_filter(self, filter_id, body, params=None, headers=None):
+        """
+        Instantiates a filter.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-put-filter.html>`_
+
+        :arg filter_id: The ID of the filter to create
+        :arg body: The filter details
+        """
+        for param in (filter_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "filters", filter_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def put_job(self, job_id, body, params=None, headers=None):
+        """
+        Instantiates an anomaly detection job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-put-job.html>`_
+
+        :arg job_id: The ID of the job to create
+        :arg body: The job
+        """
+        for param in (job_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "anomaly_detectors", job_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("enabled", "timeout")
+    def set_upgrade_mode(self, params=None, headers=None):
+        """
+        Sets a cluster wide upgrade_mode setting that prepares machine learning indices
+        for an upgrade.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-set-upgrade-mode.html>`_
+
+        :arg enabled: Whether to enable upgrade_mode ML setting or not.
+            Defaults to false.
+        :arg timeout: Controls the time to wait before action times out.
+            Defaults to 30 seconds
+        """
+        return self.transport.perform_request(
+            "POST", "/_ml/set_upgrade_mode", params=params, headers=headers
+        )
+
+    @query_params("end", "start", "timeout")
+    def start_datafeed(self, datafeed_id, body=None, params=None, headers=None):
+        """
+        Starts one or more datafeeds.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-start-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to start
+        :arg body: The start datafeed parameters
+        :arg end: The end time when the datafeed should stop. When not
+            set, the datafeed continues in real time
+        :arg start: The start time from where the datafeed should begin
+        :arg timeout: Controls the time to wait until a datafeed has
+            started. Default to 20 seconds
+        """
+        if datafeed_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'datafeed_id'."
+            )
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "datafeeds", datafeed_id, "_start"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_datafeeds", "force", "timeout")
+    def stop_datafeed(self, datafeed_id, params=None, headers=None):
+        """
+        Stops one or more datafeeds.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-stop-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to stop
+        :arg allow_no_datafeeds: Whether to ignore if a wildcard
+            expression matches no datafeeds. (This includes `_all` string or when no
+            datafeeds have been specified)
+        :arg force: True if the datafeed should be forcefully stopped.
+        :arg timeout: Controls the time to wait until a datafeed has
+            stopped. Default to 20 seconds
+        """
+        if datafeed_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'datafeed_id'."
+            )
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "datafeeds", datafeed_id, "_stop"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_indices", "expand_wildcards", "ignore_throttled", "ignore_unavailable"
+    )
+    def update_datafeed(self, datafeed_id, body, params=None, headers=None):
+        """
+        Updates certain properties of a datafeed.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-update-datafeed.html>`_
+
+        :arg datafeed_id: The ID of the datafeed to update
+        :arg body: The datafeed update settings
+        :arg allow_no_indices: Ignore if the source indices expressions
+            resolves to no concrete indices (default: true)
+        :arg expand_wildcards: Whether source index expressions should
+            get expanded to open or closed indices (default: open)  Valid choices:
+            open, closed, hidden, none, all
+        :arg ignore_throttled: Ignore indices that are marked as
+            throttled (default: true)
+        :arg ignore_unavailable: Ignore unavailable indexes (default:
+            false)
+        """
+        for param in (datafeed_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "datafeeds", datafeed_id, "_update"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def update_filter(self, filter_id, body, params=None, headers=None):
+        """
+        Updates the description of a filter, adds items, or removes items.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-update-filter.html>`_
+
+        :arg filter_id: The ID of the filter to update
+        :arg body: The filter update
+        """
+        for param in (filter_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "filters", filter_id, "_update"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def update_job(self, job_id, body, params=None, headers=None):
+        """
+        Updates certain properties of an anomaly detection job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-update-job.html>`_
+
+        :arg job_id: The ID of the job to create
+        :arg body: The job update settings
+        """
+        for param in (job_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "anomaly_detectors", job_id, "_update"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def validate(self, body, params=None, headers=None):
+        """
+        Validates an anomaly detection job.
+        `<https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html>`_
+
+        :arg body: The job config
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST",
+            "/_ml/anomaly_detectors/_validate",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def validate_detector(self, body, params=None, headers=None):
+        """
+        Validates an anomaly detection detector.
+        `<https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html>`_
+
+        :arg body: The detector
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST",
+            "/_ml/anomaly_detectors/_validate/detector",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("force", "timeout")
+    def delete_data_frame_analytics(self, id, params=None, headers=None):
+        """
+        Deletes an existing data frame analytics job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to delete
+        :arg force: True if the job should be forcefully deleted
+        :arg timeout: Controls the time to wait until a job is deleted.
+            Defaults to 1 minute
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "data_frame", "analytics", id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def evaluate_data_frame(self, body, params=None, headers=None):
+        """
+        Evaluates the data frame analytics for an annotated index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/evaluate-dfanalytics.html>`_
+
+        :arg body: The evaluation definition
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST",
+            "/_ml/data_frame/_evaluate",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_match", "from_", "size")
+    def get_data_frame_analytics(self, id=None, params=None, headers=None):
+        """
+        Retrieves configuration information for data frame analytics jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no data frame analytics. (This includes `_all` string or when no
+            data frame analytics have been specified)  Default: True
+        :arg from_: skips a number of analytics
+        :arg size: specifies a max number of analytics to get  Default:
+            100
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "data_frame", "analytics", id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_match", "from_", "size")
+    def get_data_frame_analytics_stats(self, id=None, params=None, headers=None):
+        """
+        Retrieves usage information for data frame analytics jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-dfanalytics-stats.html>`_
+
+        :arg id: The ID of the data frame analytics stats to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no data frame analytics. (This includes `_all` string or when no
+            data frame analytics have been specified)  Default: True
+        :arg from_: skips a number of analytics
+        :arg size: specifies a max number of analytics to get  Default:
+            100
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "data_frame", "analytics", id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def put_data_frame_analytics(self, id, body, params=None, headers=None):
+        """
+        Instantiates a data frame analytics job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/put-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to create
+        :arg body: The data frame analytics configuration
+        """
+        for param in (id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "data_frame", "analytics", id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("timeout")
+    def start_data_frame_analytics(self, id, body=None, params=None, headers=None):
+        """
+        Starts a data frame analytics job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/start-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to start
+        :arg body: The start data frame analytics parameters
+        :arg timeout: Controls the time to wait until the task has
+            started. Defaults to 20 seconds
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "data_frame", "analytics", id, "_start"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("allow_no_match", "force", "timeout")
+    def stop_data_frame_analytics(self, id, body=None, params=None, headers=None):
+        """
+        Stops one or more data frame analytics jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/stop-dfanalytics.html>`_
+
+        :arg id: The ID of the data frame analytics to stop
+        :arg body: The stop data frame analytics parameters
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no data frame analytics. (This includes `_all` string or when no
+            data frame analytics have been specified)
+        :arg force: True if the data frame analytics should be
+            forcefully stopped
+        :arg timeout: Controls the time to wait until the task has
+            stopped. Defaults to 20 seconds
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "data_frame", "analytics", id, "_stop"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def delete_trained_model(self, model_id, params=None, headers=None):
+        """
+        Deletes an existing trained inference model that is currently not referenced by
+        an ingest pipeline.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-inference.html>`_
+
+        :arg model_id: The ID of the trained model to delete
+        """
+        if model_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'model_id'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_ml", "inference", model_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_match",
+        "decompress_definition",
+        "from_",
+        "include_model_definition",
+        "size",
+        "tags",
+    )
+    def get_trained_models(self, model_id=None, params=None, headers=None):
+        """
+        Retrieves configuration information for a trained inference model.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-inference.html>`_
+
+        :arg model_id: The ID of the trained models to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no trained models. (This includes `_all` string or when no
+            trained models have been specified)  Default: True
+        :arg decompress_definition: Should the model definition be
+            decompressed into valid JSON or returned in a custom compressed format.
+            Defaults to true.  Default: True
+        :arg from_: skips a number of trained models
+        :arg include_model_definition: Should the full model definition
+            be included in the results. These definitions can be large. So be
+            cautious when including them. Defaults to false.
+        :arg size: specifies a max number of trained models to get
+            Default: 100
+        :arg tags: A comma-separated list of tags that the model must
+            have.
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "inference", model_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_match", "from_", "size")
+    def get_trained_models_stats(self, model_id=None, params=None, headers=None):
+        """
+        Retrieves usage information for trained inference models.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-inference-stats.html>`_
+
+        :arg model_id: The ID of the trained models stats to fetch
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no trained models. (This includes `_all` string or when no
+            trained models have been specified)  Default: True
+        :arg from_: skips a number of trained models
+        :arg size: specifies a max number of trained models to get
+            Default: 100
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_ml", "inference", model_id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def put_trained_model(self, model_id, body, params=None, headers=None):
+        """
+        Creates an inference trained model.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/put-inference.html>`_
+
+        :arg model_id: The ID of the trained models to store
+        :arg body: The trained model configuration
+        """
+        for param in (model_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_ml", "inference", model_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def estimate_model_memory(self, body, params=None, headers=None):
+        """
+        Estimates the model memory
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-apis.html>`_
+
+        :arg body: The analysis config, plus cardinality estimates for
+            fields it references
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST",
+            "/_ml/anomaly_detectors/_estimate_model_memory",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def explain_data_frame_analytics(
+        self, body=None, id=None, params=None, headers=None
+    ):
+        """
+        Explains a data frame analytics config.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/explain-dfanalytics.html>`_
+
+        :arg body: The data frame analytics config to explain
+        :arg id: The ID of the data frame analytics to explain
+        """
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_ml", "data_frame", "analytics", id, "_explain"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("from_", "size")
+    def get_categories(
+        self, job_id, body=None, category_id=None, params=None, headers=None
+    ):
+        """
+        Retrieves anomaly detection job results for one or more categories.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-category.html>`_
+
+        :arg job_id: The name of the job
+        :arg body: Category selection details if not provided in URI
+        :arg category_id: The identifier of the category definition of
+            interest
+        :arg from_: skips a number of categories
+        :arg size: specifies a max number of categories to get
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml", "anomaly_detectors", job_id, "results", "categories", category_id
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("desc", "end", "from_", "size", "sort", "start")
+    def get_model_snapshots(
+        self, job_id, body=None, snapshot_id=None, params=None, headers=None
+    ):
+        """
+        Retrieves information about model snapshots.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-get-snapshot.html>`_
+
+        :arg job_id: The ID of the job to fetch
+        :arg body: Model snapshot selection criteria
+        :arg snapshot_id: The ID of the snapshot to fetch
+        :arg desc: True if the results should be sorted in descending
+            order
+        :arg end: The filter 'end' query parameter
+        :arg from_: Skips a number of documents
+        :arg size: The default number of documents returned in queries
+            as a string.
+        :arg sort: Name of the field to sort on
+        :arg start: The filter 'start' query parameter
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if job_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'job_id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("delete_intervening_results")
+    def revert_model_snapshot(
+        self, job_id, snapshot_id, body=None, params=None, headers=None
+    ):
+        """
+        Reverts to a specific snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-revert-snapshot.html>`_
+
+        :arg job_id: The ID of the job to fetch
+        :arg snapshot_id: The ID of the snapshot to revert to
+        :arg body: Reversion options
+        :arg delete_intervening_results: Should we reset the results
+            back to the time of the snapshot?
+        """
+        for param in (job_id, snapshot_id):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml",
+                "anomaly_detectors",
+                job_id,
+                "model_snapshots",
+                snapshot_id,
+                "_revert",
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def update_model_snapshot(
+        self, job_id, snapshot_id, body, params=None, headers=None
+    ):
+        """
+        Updates certain properties of a snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/ml-update-snapshot.html>`_
+
+        :arg job_id: The ID of the job to fetch
+        :arg snapshot_id: The ID of the snapshot to update
+        :arg body: The model snapshot properties to update
+        """
+        for param in (job_id, snapshot_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(
+                "_ml",
+                "anomaly_detectors",
+                job_id,
+                "model_snapshots",
+                snapshot_id,
+                "_update",
+            ),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/client/monitoring.py b/elasticsearch_7/client/monitoring.py
new file mode 100644
index 0000000000000000000000000000000000000000..cec6f190b255a3a8f1a0c876f657d9f903d0cefc
--- /dev/null
+++ b/elasticsearch_7/client/monitoring.py
@@ -0,0 +1,34 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body
+
+
+class MonitoringClient(NamespacedClient):
+    @query_params("interval", "system_api_version", "system_id")
+    def bulk(self, body, doc_type=None, params=None, headers=None):
+        """
+        Used by the monitoring features to send monitoring data.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/monitor-elasticsearch-cluster.html>`_
+
+        :arg body: The operation definition and data (action-data
+            pairs), separated by newlines
+        :arg doc_type: Default document type for items which don't
+            provide one
+        :arg interval: Collection interval (e.g., '10s' or '10000ms') of
+            the payload
+        :arg system_api_version: API Version of the monitored system
+        :arg system_id: Identifier of the monitored system
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        body = _bulk_body(self.transport.serializer, body)
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_monitoring", doc_type, "bulk"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/client/nodes.py b/elasticsearch_7/client/nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..d65f3b55ecc726b0530becca27da263378fd5780
--- /dev/null
+++ b/elasticsearch_7/client/nodes.py
@@ -0,0 +1,160 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path
+
+
+class NodesClient(NamespacedClient):
+    @query_params("timeout")
+    def reload_secure_settings(
+        self, body=None, node_id=None, params=None, headers=None
+    ):
+        """
+        Reloads secure settings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/secure-settings.html#reloadable-secure-settings>`_
+
+        :arg body: An object containing the password for the
+            elasticsearch keystore
+        :arg node_id: A comma-separated list of node IDs to span the
+            reload/reinit call. Should stay empty because reloading usually involves
+            all cluster nodes.
+        :arg timeout: Explicit operation timeout
+        """
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_nodes", node_id, "reload_secure_settings"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("flat_settings", "timeout")
+    def info(self, node_id=None, metric=None, params=None, headers=None):
+        """
+        Returns information about nodes in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-nodes-info.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg metric: A comma-separated list of metrics you wish
+            returned. Leave empty to return all.  Valid choices: settings, os,
+            process, jvm, thread_pool, transport, http, plugins, ingest
+        :arg flat_settings: Return settings in flat format (default:
+            false)
+        :arg timeout: Explicit operation timeout
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_nodes", node_id, metric), params=params, headers=headers
+        )
+
+    @query_params(
+        "doc_type", "ignore_idle_threads", "interval", "snapshots", "threads", "timeout"
+    )
+    def hot_threads(self, node_id=None, params=None, headers=None):
+        """
+        Returns information about hot threads on each node in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-nodes-hot-threads.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg doc_type: The type to sample (default: cpu)  Valid choices:
+            cpu, wait, block
+        :arg ignore_idle_threads: Don't show threads that are in known-
+            idle places, such as waiting on a socket select or pulling from an empty
+            task queue (default: true)
+        :arg interval: The interval for the second sampling of threads
+        :arg snapshots: Number of samples of thread stacktrace (default:
+            10)
+        :arg threads: Specify the number of threads to provide
+            information for (default: 3)
+        :arg timeout: Explicit operation timeout
+        """
+        # type is a reserved word so it cannot be used, use doc_type instead
+        if "doc_type" in params:
+            params["type"] = params.pop("doc_type")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_nodes", node_id, "hot_threads"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("timeout")
+    def usage(self, node_id=None, metric=None, params=None, headers=None):
+        """
+        Returns low-level information about REST actions usage on nodes.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-nodes-usage.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg metric: Limit the information returned to the specified
+            metrics  Valid choices: _all, rest_actions
+        :arg timeout: Explicit operation timeout
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_nodes", node_id, "usage", metric),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "completion_fields",
+        "fielddata_fields",
+        "fields",
+        "groups",
+        "include_segment_file_sizes",
+        "level",
+        "timeout",
+        "types",
+    )
+    def stats(
+        self, node_id=None, metric=None, index_metric=None, params=None, headers=None
+    ):
+        """
+        Returns statistical information about nodes in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-nodes-stats.html>`_
+
+        :arg node_id: A comma-separated list of node IDs or names to
+            limit the returned information; use `_local` to return information from
+            the node you're connecting to, leave empty to get information from all
+            nodes
+        :arg metric: Limit the information returned to the specified
+            metrics  Valid choices: _all, breaker, fs, http, indices, jvm, os,
+            process, thread_pool, transport, discovery
+        :arg index_metric: Limit the information returned for `indices`
+            metric to the specific index metrics. Isn't used if `indices` (or `all`)
+            metric isn't specified.  Valid choices: _all, completion, docs,
+            fielddata, query_cache, flush, get, indexing, merge, request_cache,
+            refresh, search, segments, store, warmer, suggest
+        :arg completion_fields: A comma-separated list of fields for
+            `fielddata` and `suggest` index metric (supports wildcards)
+        :arg fielddata_fields: A comma-separated list of fields for
+            `fielddata` index metric (supports wildcards)
+        :arg fields: A comma-separated list of fields for `fielddata`
+            and `completion` index metric (supports wildcards)
+        :arg groups: A comma-separated list of search groups for
+            `search` index metric
+        :arg include_segment_file_sizes: Whether to report the
+            aggregated disk usage of each one of the Lucene index files (only
+            applies if segment stats are requested)
+        :arg level: Return indices stats aggregated at index, node or
+            shard level  Valid choices: indices, node, shards  Default: node
+        :arg timeout: Explicit operation timeout
+        :arg types: A comma-separated list of document types for the
+            `indexing` index metric
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_nodes", node_id, "stats", metric, index_metric),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/client/remote.py b/elasticsearch_7/client/remote.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c2767b1dfc6ecc89100b32bbbd435bac6f8ab4a
--- /dev/null
+++ b/elasticsearch_7/client/remote.py
@@ -0,0 +1,16 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params
+
+
+class RemoteClient(NamespacedClient):
+    @query_params()
+    def info(self, params=None, headers=None):
+        """
+        `<http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_remote/info", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/client/rollup.py b/elasticsearch_7/client/rollup.py
new file mode 100644
index 0000000000000000000000000000000000000000..acd7abd976f403d1b240d011122b9ef49cc96f6b
--- /dev/null
+++ b/elasticsearch_7/client/rollup.py
@@ -0,0 +1,155 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class RollupClient(NamespacedClient):
+    @query_params()
+    def delete_job(self, id, params=None, headers=None):
+        """
+        Deletes an existing rollup job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-delete-job.html>`_
+
+        :arg id: The ID of the job to delete
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "DELETE", _make_path("_rollup", "job", id), params=params, headers=headers
+        )
+
+    @query_params()
+    def get_jobs(self, id=None, params=None, headers=None):
+        """
+        Retrieves the configuration, stats, and status of rollup jobs.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-get-job.html>`_
+
+        :arg id: The ID of the job(s) to fetch. Accepts glob patterns,
+            or left blank for all jobs
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_rollup", "job", id), params=params, headers=headers
+        )
+
+    @query_params()
+    def get_rollup_caps(self, id=None, params=None, headers=None):
+        """
+        Returns the capabilities of any rollup jobs that have been configured for a
+        specific index or index pattern.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-get-rollup-caps.html>`_
+
+        :arg id: The ID of the index to check rollup capabilities on, or
+            left blank for all jobs
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_rollup", "data", id), params=params, headers=headers
+        )
+
+    @query_params()
+    def get_rollup_index_caps(self, index, params=None, headers=None):
+        """
+        Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the
+        index where rollup data is stored).
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-get-rollup-index-caps.html>`_
+
+        :arg index: The rollup index or index pattern to obtain rollup
+            capabilities from.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'index'.")
+
+        return self.transport.perform_request(
+            "GET", _make_path(index, "_rollup", "data"), params=params, headers=headers
+        )
+
+    @query_params()
+    def put_job(self, id, body, params=None, headers=None):
+        """
+        Creates a rollup job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-put-job.html>`_
+
+        :arg id: The ID of the job to create
+        :arg body: The job configuration
+        """
+        for param in (id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_rollup", "job", id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("rest_total_hits_as_int", "typed_keys")
+    def rollup_search(self, index, body, doc_type=None, params=None, headers=None):
+        """
+        Enables searching rolled-up data using the standard query DSL.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-search.html>`_
+
+        :arg index: The indices or index-pattern(s) (containing rollup
+            or regular data) that should be searched
+        :arg body: The search request body
+        :arg doc_type: The doc type inside the index
+        :arg rest_total_hits_as_int: Indicates whether hits.total should
+            be rendered as an integer or an object in the rest search response
+        :arg typed_keys: Specify whether aggregation and suggester names
+            should be prefixed by their respective types in the response
+        """
+        for param in (index, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, doc_type, "_rollup_search"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def start_job(self, id, params=None, headers=None):
+        """
+        Starts an existing, stopped rollup job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-start-job.html>`_
+
+        :arg id: The ID of the job to start
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_rollup", "job", id, "_start"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("timeout", "wait_for_completion")
+    def stop_job(self, id, params=None, headers=None):
+        """
+        Stops an existing, started rollup job.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/rollup-stop-job.html>`_
+
+        :arg id: The ID of the job to stop
+        :arg timeout: Block for (at maximum) the specified duration
+            while waiting for the job to stop.  Defaults to 30s.
+        :arg wait_for_completion: True if the API should block until the
+            job has fully stopped, false if should be executed async. Defaults to
+            false.
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_rollup", "job", id, "_stop"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/client/searchable_snapshots.py b/elasticsearch_7/client/searchable_snapshots.py
new file mode 100644
index 0000000000000000000000000000000000000000..473143c2d516b41fdd039954a064c62e76171e52
--- /dev/null
+++ b/elasticsearch_7/client/searchable_snapshots.py
@@ -0,0 +1,92 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class SearchableSnapshotsClient(NamespacedClient):
+    @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
+    def clear_cache(self, index=None, params=None, headers=None):
+        """
+        Clear the cache of searchable snapshots.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/searchable-snapshots-api-clear-cache.html>`_
+
+        :arg index: A comma-separated list of index name to limit the
+            operation
+        :arg allow_no_indices: Whether to ignore if a wildcard indices
+            expression resolves into no concrete indices. (This includes `_all`
+            string or when no indices have been specified)
+        :arg expand_wildcards: Whether to expand wildcard expression to
+            concrete indices that are open, closed or both.  Valid choices: open,
+            closed, none, all  Default: open
+        :arg ignore_unavailable: Whether specified concrete indices
+            should be ignored when unavailable (missing or closed)
+        """
+        return self.transport.perform_request(
+            "POST",
+            _make_path(index, "_searchable_snapshots", "cache", "clear"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "wait_for_completion")
+    def mount(self, repository, snapshot, body, params=None, headers=None):
+        """
+        Mount a snapshot as a searchable index.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/searchable-snapshots-api-mount-snapshot.html>`_
+
+        :arg repository: The name of the repository containing the
+            snapshot of the index to mount
+        :arg snapshot: The name of the snapshot of the index to mount
+        :arg body: The restore configuration for mounting the snapshot
+            as searchable
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg wait_for_completion: Should this request wait until the
+            operation has completed before returning
+        """
+        for param in (repository, snapshot, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_snapshot", repository, snapshot, "_mount"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def repository_stats(self, repository, params=None, headers=None):
+        """
+        Retrieve usage statistics about a snapshot repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/searchable-snapshots-repository-stats.html>`_
+
+        :arg repository: The repository for which to get the stats for
+        """
+        if repository in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'repository'.")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_snapshot", repository, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def stats(self, index=None, params=None, headers=None):
+        """
+        Retrieve various statistics about searchable snapshots.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/searchable-snapshots-api-stats.html>`_
+
+        :arg index: A comma-separated list of index names
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path(index, "_searchable_snapshots", "stats"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/client/security.py b/elasticsearch_7/client/security.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa3b22def4a05b9e301e211589c13eeefe4308e3
--- /dev/null
+++ b/elasticsearch_7/client/security.py
@@ -0,0 +1,497 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class SecurityClient(NamespacedClient):
+    @query_params()
+    def authenticate(self, params=None, headers=None):
+        """
+        Enables authentication as a user and retrieve information about the
+        authenticated user.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-authenticate.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_security/_authenticate", params=params, headers=headers
+        )
+
+    @query_params("refresh")
+    def change_password(self, body, username=None, params=None, headers=None):
+        """
+        Changes the passwords of users in the native realm and built-in users.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-change-password.html>`_
+
+        :arg body: the new password for the user
+        :arg username: The username of the user to change the password
+            for
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "user", username, "_password"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("usernames")
+    def clear_cached_realms(self, realms, params=None, headers=None):
+        """
+        Evicts users from the user cache. Can completely clear the cache or evict
+        specific users.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-clear-cache.html>`_
+
+        :arg realms: Comma-separated list of realms to clear
+        :arg usernames: Comma-separated list of usernames to clear from
+            the cache
+        """
+        if realms in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'realms'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_security", "realm", realms, "_clear_cache"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def clear_cached_roles(self, name, params=None, headers=None):
+        """
+        Evicts roles from the native role cache.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-clear-role-cache.html>`_
+
+        :arg name: Role name
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_security", "role", name, "_clear_cache"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    def create_api_key(self, body, params=None, headers=None):
+        """
+        Creates an API key for access without requiring basic authentication.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-create-api-key.html>`_
+
+        :arg body: The api key request to create an API key
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "PUT", "/_security/api_key", params=params, headers=headers, body=body
+        )
+
+    @query_params("refresh")
+    def delete_privileges(self, application, name, params=None, headers=None):
+        """
+        Removes application privileges.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-delete-privilege.html>`_
+
+        :arg application: Application name
+        :arg name: Privilege name
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        for param in (application, name):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_security", "privilege", application, name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    def delete_role(self, name, params=None, headers=None):
+        """
+        Removes roles in the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-delete-role.html>`_
+
+        :arg name: Role name
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_security", "role", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    def delete_role_mapping(self, name, params=None, headers=None):
+        """
+        Removes role mappings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-delete-role-mapping.html>`_
+
+        :arg name: Role-mapping name
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'name'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_security", "role_mapping", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    def delete_user(self, username, params=None, headers=None):
+        """
+        Deletes users from the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-delete-user.html>`_
+
+        :arg username: username
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if username in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'username'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_security", "user", username),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    def disable_user(self, username, params=None, headers=None):
+        """
+        Disables users in the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-disable-user.html>`_
+
+        :arg username: The username of the user to disable
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if username in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'username'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "user", username, "_disable"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("refresh")
+    def enable_user(self, username, params=None, headers=None):
+        """
+        Enables users in the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-enable-user.html>`_
+
+        :arg username: The username of the user to enable
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if username in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'username'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "user", username, "_enable"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("id", "name", "owner", "realm_name", "username")
+    def get_api_key(self, params=None, headers=None):
+        """
+        Retrieves information for one or more API keys.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-api-key.html>`_
+
+        :arg id: API key id of the API key to be retrieved
+        :arg name: API key name of the API key to be retrieved
+        :arg owner: flag to query API keys owned by the currently
+            authenticated user
+        :arg realm_name: realm name of the user who created this API key
+            to be retrieved
+        :arg username: user name of the user who created this API key to
+            be retrieved
+        """
+        return self.transport.perform_request(
+            "GET", "/_security/api_key", params=params, headers=headers
+        )
+
+    @query_params()
+    def get_privileges(self, application=None, name=None, params=None, headers=None):
+        """
+        Retrieves application privileges.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-privileges.html>`_
+
+        :arg application: Application name
+        :arg name: Privilege name
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_security", "privilege", application, name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def get_role(self, name=None, params=None, headers=None):
+        """
+        Retrieves roles in the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-role.html>`_
+
+        :arg name: Role name
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_security", "role", name), params=params, headers=headers
+        )
+
+    @query_params()
+    def get_role_mapping(self, name=None, params=None, headers=None):
+        """
+        Retrieves role mappings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-role-mapping.html>`_
+
+        :arg name: Role-Mapping name
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_security", "role_mapping", name),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def get_token(self, body, params=None, headers=None):
+        """
+        Creates a bearer token for access without requiring basic authentication.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-token.html>`_
+
+        :arg body: The token request to get
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST", "/_security/oauth2/token", params=params, headers=headers, body=body
+        )
+
+    @query_params()
+    def get_user(self, username=None, params=None, headers=None):
+        """
+        Retrieves information about users in the native realm and built-in users.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-user.html>`_
+
+        :arg username: A comma-separated list of usernames
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_security", "user", username),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def get_user_privileges(self, params=None, headers=None):
+        """
+        Retrieves application privileges.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-privileges.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_security/user/_privileges", params=params, headers=headers
+        )
+
+    @query_params()
+    def has_privileges(self, body, user=None, params=None, headers=None):
+        """
+        Determines whether the specified user has a specified list of privileges.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-has-privileges.html>`_
+
+        :arg body: The privileges to test
+        :arg user: Username
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_security", "user", user, "_has_privileges"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def invalidate_api_key(self, body, params=None, headers=None):
+        """
+        Invalidates one or more API keys.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-invalidate-api-key.html>`_
+
+        :arg body: The api key request to invalidate API key(s)
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "DELETE", "/_security/api_key", params=params, headers=headers, body=body
+        )
+
+    @query_params()
+    def invalidate_token(self, body, params=None, headers=None):
+        """
+        Invalidates one or more access tokens or refresh tokens.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-invalidate-token.html>`_
+
+        :arg body: The token to invalidate
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            "/_security/oauth2/token",
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("refresh")
+    def put_privileges(self, body, params=None, headers=None):
+        """
+        Adds or updates application privileges.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-put-privileges.html>`_
+
+        :arg body: The privilege(s) to add
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "PUT", "/_security/privilege/", params=params, headers=headers, body=body
+        )
+
+    @query_params("refresh")
+    def put_role(self, name, body, params=None, headers=None):
+        """
+        Adds and updates roles in the native realm.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-put-role.html>`_
+
+        :arg name: Role name
+        :arg body: The role to add
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "role", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("refresh")
+    def put_role_mapping(self, name, body, params=None, headers=None):
+        """
+        Creates and updates role mappings.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-put-role-mapping.html>`_
+
+        :arg name: Role-mapping name
+        :arg body: The role mapping to add
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        for param in (name, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "role_mapping", name),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("refresh")
+    def put_user(self, username, body, params=None, headers=None):
+        """
+        Adds and updates users in the native realm. These users are commonly referred
+        to as native users.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-put-user.html>`_
+
+        :arg username: The username of the User
+        :arg body: The user to add
+        :arg refresh: If `true` (the default) then refresh the affected
+            shards to make this operation visible to search, if `wait_for` then wait
+            for a refresh to make this operation visible to search, if `false` then
+            do nothing with refreshes.  Valid choices: true, false, wait_for
+        """
+        for param in (username, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_security", "user", username),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def get_builtin_privileges(self, params=None, headers=None):
+        """
+        Retrieves the list of cluster privileges and index privileges that are
+        available in this version of Elasticsearch.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-get-builtin-privileges.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_security/privilege/_builtin", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/client/slm.py b/elasticsearch_7/client/slm.py
new file mode 100644
index 0000000000000000000000000000000000000000..368e5ac0b5563f1b2eb14e7079b8f4d49c80b7c5
--- /dev/null
+++ b/elasticsearch_7/client/slm.py
@@ -0,0 +1,135 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class SlmClient(NamespacedClient):
+    @query_params()
+    def delete_lifecycle(self, policy_id, params=None, headers=None):
+        """
+        Deletes an existing snapshot lifecycle policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-delete-policy.html>`_
+
+        :arg policy_id: The id of the snapshot lifecycle policy to
+            remove
+        """
+        if policy_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'policy_id'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_slm", "policy", policy_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def execute_lifecycle(self, policy_id, params=None, headers=None):
+        """
+        Immediately creates a snapshot according to the lifecycle policy, without
+        waiting for the scheduled time.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-execute-lifecycle.html>`_
+
+        :arg policy_id: The id of the snapshot lifecycle policy to be
+            executed
+        """
+        if policy_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'policy_id'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_slm", "policy", policy_id, "_execute"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def execute_retention(self, params=None, headers=None):
+        """
+        Deletes any snapshots that are expired according to the policy's retention
+        rules.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-execute-retention.html>`_
+        """
+        return self.transport.perform_request(
+            "POST", "/_slm/_execute_retention", params=params, headers=headers
+        )
+
+    @query_params()
+    def get_lifecycle(self, policy_id=None, params=None, headers=None):
+        """
+        Retrieves one or more snapshot lifecycle policy definitions and information
+        about the latest snapshot attempts.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-get-policy.html>`_
+
+        :arg policy_id: Comma-separated list of snapshot lifecycle
+            policies to retrieve
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_slm", "policy", policy_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def get_stats(self, params=None, headers=None):
+        """
+        Returns global and policy-level statistics about actions taken by snapshot
+        lifecycle management.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-get-stats.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_slm/stats", params=params, headers=headers
+        )
+
+    @query_params()
+    def put_lifecycle(self, policy_id, body=None, params=None, headers=None):
+        """
+        Creates or updates a snapshot lifecycle policy.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-put-policy.html>`_
+
+        :arg policy_id: The id of the snapshot lifecycle policy
+        :arg body: The snapshot lifecycle policy definition to register
+        """
+        if policy_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'policy_id'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_slm", "policy", policy_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def get_status(self, params=None, headers=None):
+        """
+        Retrieves the status of snapshot lifecycle management (SLM).
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-get-status.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_slm/status", params=params, headers=headers
+        )
+
+    @query_params()
+    def start(self, params=None, headers=None):
+        """
+        Turns on snapshot lifecycle management (SLM).
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-start.html>`_
+        """
+        return self.transport.perform_request(
+            "POST", "/_slm/start", params=params, headers=headers
+        )
+
+    @query_params()
+    def stop(self, params=None, headers=None):
+        """
+        Turns off snapshot lifecycle management (SLM).
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/slm-api-stop.html>`_
+        """
+        return self.transport.perform_request(
+            "POST", "/_slm/stop", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/client/snapshot.py b/elasticsearch_7/client/snapshot.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f896dac188ea60413190a5f4d9cab0cf60bcde0
--- /dev/null
+++ b/elasticsearch_7/client/snapshot.py
@@ -0,0 +1,234 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class SnapshotClient(NamespacedClient):
+    @query_params("master_timeout", "wait_for_completion")
+    def create(self, repository, snapshot, body=None, params=None, headers=None):
+        """
+        Creates a snapshot in a repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg snapshot: A snapshot name
+        :arg body: The snapshot definition
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg wait_for_completion: Should this request wait until the
+            operation has completed before returning
+        """
+        for param in (repository, snapshot):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_snapshot", repository, snapshot),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("master_timeout")
+    def delete(self, repository, snapshot, params=None, headers=None):
+        """
+        Deletes a snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg snapshot: A snapshot name
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        for param in (repository, snapshot):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_snapshot", repository, snapshot),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("ignore_unavailable", "master_timeout", "verbose")
+    def get(self, repository, snapshot, params=None, headers=None):
+        """
+        Returns information about a snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg snapshot: A comma-separated list of snapshot names
+        :arg ignore_unavailable: Whether to ignore unavailable
+            snapshots, defaults to false which means a SnapshotMissingException is
+            thrown
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg verbose: Whether to show verbose snapshot info or only show
+            the basic info found in the repository index blob
+        """
+        for param in (repository, snapshot):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_snapshot", repository, snapshot),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "timeout")
+    def delete_repository(self, repository, params=None, headers=None):
+        """
+        Deletes a repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: Name of the snapshot repository to unregister.
+            Wildcard (`*`) patterns are supported.
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        if repository in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'repository'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_snapshot", repository),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("local", "master_timeout")
+    def get_repository(self, repository=None, params=None, headers=None):
+        """
+        Returns information about a repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A comma-separated list of repository names
+        :arg local: Return local information, do not retrieve the state
+            from master node (default: false)
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return self.transport.perform_request(
+            "GET", _make_path("_snapshot", repository), params=params, headers=headers
+        )
+
+    @query_params("master_timeout", "timeout", "verify")
+    def create_repository(self, repository, body, params=None, headers=None):
+        """
+        Creates a repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg body: The repository definition
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        :arg verify: Whether to verify the repository after creation
+        """
+        for param in (repository, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_snapshot", repository),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("master_timeout", "wait_for_completion")
+    def restore(self, repository, snapshot, body=None, params=None, headers=None):
+        """
+        Restores a snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg snapshot: A snapshot name
+        :arg body: Details of what to restore
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg wait_for_completion: Should this request wait until the
+            operation has completed before returning
+        """
+        for param in (repository, snapshot):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_snapshot", repository, snapshot, "_restore"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("ignore_unavailable", "master_timeout")
+    def status(self, repository=None, snapshot=None, params=None, headers=None):
+        """
+        Returns information about the status of a snapshot.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg snapshot: A comma-separated list of snapshot names
+        :arg ignore_unavailable: Whether to ignore unavailable
+            snapshots, defaults to false which means a SnapshotMissingException is
+            thrown
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_snapshot", repository, snapshot, "_status"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "timeout")
+    def verify_repository(self, repository, params=None, headers=None):
+        """
+        Verifies a repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/modules-snapshots.html>`_
+
+        :arg repository: A repository name
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        if repository in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'repository'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_snapshot", repository, "_verify"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("master_timeout", "timeout")
+    def cleanup_repository(self, repository, params=None, headers=None):
+        """
+        Removes stale data from repository.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/clean-up-snapshot-repo-api.html>`_
+
+        :arg repository: A repository name
+        :arg master_timeout: Explicit operation timeout for connection
+            to master node
+        :arg timeout: Explicit operation timeout
+        """
+        if repository in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'repository'.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_snapshot", repository, "_cleanup"),
+            params=params,
+            headers=headers,
+        )
diff --git a/elasticsearch_7/client/sql.py b/elasticsearch_7/client/sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f8c18f79ce55028017467c5aca333308a310b4a
--- /dev/null
+++ b/elasticsearch_7/client/sql.py
@@ -0,0 +1,56 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, SKIP_IN_PATH
+
+
+class SqlClient(NamespacedClient):
+    @query_params()
+    def clear_cursor(self, body, params=None, headers=None):
+        """
+        Clears the SQL cursor
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/sql-pagination.html>`_
+
+        :arg body: Specify the cursor value in the `cursor` element to
+            clean the cursor.
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST", "/_sql/close", params=params, headers=headers, body=body
+        )
+
+    @query_params("format")
+    def query(self, body, params=None, headers=None):
+        """
+        Executes a SQL request
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/sql-rest-overview.html>`_
+
+        :arg body: Use the `query` element to start a query. Use the
+            `cursor` element to continue a query.
+        :arg format: a short version of the Accept header, e.g. json,
+            yaml
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST", "/_sql", params=params, headers=headers, body=body
+        )
+
+    @query_params()
+    def translate(self, body, params=None, headers=None):
+        """
+        Translates SQL into Elasticsearch queries
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/sql-translate.html>`_
+
+        :arg body: Specify the query in the `query` element.
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST", "/_sql/translate", params=params, headers=headers, body=body
+        )
diff --git a/elasticsearch_7/client/ssl.py b/elasticsearch_7/client/ssl.py
new file mode 100644
index 0000000000000000000000000000000000000000..23260f3fbef5f3e139d1e14be5a2a771b4294470
--- /dev/null
+++ b/elasticsearch_7/client/ssl.py
@@ -0,0 +1,18 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params
+
+
+class SslClient(NamespacedClient):
+    @query_params()
+    def certificates(self, params=None, headers=None):
+        """
+        Retrieves information about the X.509 certificates used to encrypt
+        communications in the cluster.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/security-api-ssl.html>`_
+        """
+        return self.transport.perform_request(
+            "GET", "/_ssl/certificates", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/client/tasks.py b/elasticsearch_7/client/tasks.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c9ba2c5c37d5e3b53ab614e8ab0d2ab6194b86e
--- /dev/null
+++ b/elasticsearch_7/client/tasks.py
@@ -0,0 +1,89 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import warnings
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class TasksClient(NamespacedClient):
+    @query_params(
+        "actions",
+        "detailed",
+        "group_by",
+        "nodes",
+        "parent_task_id",
+        "timeout",
+        "wait_for_completion",
+    )
+    def list(self, params=None, headers=None):
+        """
+        Returns a list of tasks.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/tasks.html>`_
+
+        :arg actions: A comma-separated list of actions that should be
+            returned. Leave empty to return all.
+        :arg detailed: Return detailed task information (default: false)
+        :arg group_by: Group tasks by nodes or parent/child
+            relationships  Valid choices: nodes, parents, none  Default: nodes
+        :arg nodes: A comma-separated list of node IDs or names to limit
+            the returned information; use `_local` to return information from the
+            node you're connecting to, leave empty to get information from all nodes
+        :arg parent_task_id: Return tasks with specified parent task id
+            (node_id:task_number). Set to -1 to return all.
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_completion: Wait for the matching tasks to
+            complete (default: false)
+        """
+        return self.transport.perform_request(
+            "GET", "/_tasks", params=params, headers=headers
+        )
+
+    @query_params("actions", "nodes", "parent_task_id", "wait_for_completion")
+    def cancel(self, task_id=None, params=None, headers=None):
+        """
+        Cancels a task, if it can be cancelled through an API.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/tasks.html>`_
+
+        :arg task_id: Cancel the task with specified task id
+            (node_id:task_number)
+        :arg actions: A comma-separated list of actions that should be
+            cancelled. Leave empty to cancel all.
+        :arg nodes: A comma-separated list of node IDs or names to limit
+            the returned information; use `_local` to return information from the
+            node you're connecting to, leave empty to get information from all nodes
+        :arg parent_task_id: Cancel tasks with specified parent task id
+            (node_id:task_number). Set to -1 to cancel all.
+        :arg wait_for_completion: Should the request block until the
+            cancellation of the task and its descendant tasks is completed. Defaults
+            to false
+        """
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_tasks", task_id, "_cancel"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("timeout", "wait_for_completion")
+    def get(self, task_id=None, params=None, headers=None):
+        """
+        Returns information about a task.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/tasks.html>`_
+
+        :arg task_id: Return the task with specified id
+            (node_id:task_number)
+        :arg timeout: Explicit operation timeout
+        :arg wait_for_completion: Wait for the matching tasks to
+            complete (default: false)
+        """
+        if task_id in SKIP_IN_PATH:
+            warnings.warn(
+                "Calling client.tasks.get() without a task_id is deprecated "
+                "and will be removed in v8.0. Use client.tasks.list() instead.",
+                category=DeprecationWarning,
+            )
+
+        return self.transport.perform_request(
+            "GET", _make_path("_tasks", task_id), params=params, headers=headers
+        )
diff --git a/elasticsearch_7/client/transform.py b/elasticsearch_7/client/transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..0098269a749abb2b0fccd27e93bf946d4546f342
--- /dev/null
+++ b/elasticsearch_7/client/transform.py
@@ -0,0 +1,208 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class TransformClient(NamespacedClient):
+    @query_params("force")
+    def delete_transform(self, transform_id, params=None, headers=None):
+        """
+        Deletes an existing transform.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/delete-transform.html>`_
+
+        :arg transform_id: The id of the transform to delete
+        :arg force: When `true`, the transform is deleted regardless of
+            its current state. The default value is `false`, meaning that the
+            transform must be `stopped` before it can be deleted.
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_transform", transform_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_match", "from_", "size")
+    def get_transform(self, transform_id=None, params=None, headers=None):
+        """
+        Retrieves configuration information for transforms.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-transform.html>`_
+
+        :arg transform_id: The id or comma delimited list of id
+            expressions of the transforms to get, '_all' or '*' implies get all
+            transforms
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no transforms. (This includes `_all` string or when no
+            transforms have been specified)
+        :arg from_: skips a number of transform configs, defaults to 0
+        :arg size: specifies a max number of transforms to get, defaults
+            to 100
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_transform", transform_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("allow_no_match", "from_", "size")
+    def get_transform_stats(self, transform_id, params=None, headers=None):
+        """
+        Retrieves usage information for transforms.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/get-transform-stats.html>`_
+
+        :arg transform_id: The id of the transform for which to get
+            stats. '_all' or '*' implies all transforms
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no transforms. (This includes `_all` string or when no
+            transforms have been specified)
+        :arg from_: skips a number of transform stats, defaults to 0
+        :arg size: specifies a max number of transform stats to get,
+            defaults to 100
+        """
+        # from is a reserved word so it cannot be used, use from_ instead
+        if "from_" in params:
+            params["from"] = params.pop("from_")
+
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_transform", transform_id, "_stats"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def preview_transform(self, body, params=None, headers=None):
+        """
+        Previews a transform.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/preview-transform.html>`_
+
+        :arg body: The definition for the transform to preview
+        """
+        if body in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'body'.")
+
+        return self.transport.perform_request(
+            "POST", "/_transform/_preview", params=params, headers=headers, body=body
+        )
+
+    @query_params("defer_validation")
+    def put_transform(self, transform_id, body, params=None, headers=None):
+        """
+        Instantiates a transform.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/put-transform.html>`_
+
+        :arg transform_id: The id of the new transform.
+        :arg body: The transform definition
+        :arg defer_validation: If validations should be deferred until
+            transform starts, defaults to false.
+        """
+        for param in (transform_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_transform", transform_id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params("timeout")
+    def start_transform(self, transform_id, params=None, headers=None):
+        """
+        Starts one or more transforms.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/start-transform.html>`_
+
+        :arg transform_id: The id of the transform to start
+        :arg timeout: Controls the time to wait for the transform to
+            start
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_transform", transform_id, "_start"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params(
+        "allow_no_match",
+        "force",
+        "timeout",
+        "wait_for_checkpoint",
+        "wait_for_completion",
+    )
+    def stop_transform(self, transform_id, params=None, headers=None):
+        """
+        Stops one or more transforms.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/stop-transform.html>`_
+
+        :arg transform_id: The id of the transform to stop
+        :arg allow_no_match: Whether to ignore if a wildcard expression
+            matches no transforms. (This includes `_all` string or when no
+            transforms have been specified)
+        :arg force: Whether to force stop a failed transform or not.
+            Default to false
+        :arg timeout: Controls the time to wait until the transform has
+            stopped. Default to 30 seconds
+        :arg wait_for_checkpoint: Whether to wait for the transform to
+            reach a checkpoint before stopping. Default to false
+        :arg wait_for_completion: Whether to wait for the transform to
+            fully stop before returning or not. Default to false
+        """
+        if transform_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for a required argument 'transform_id'."
+            )
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_transform", transform_id, "_stop"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("defer_validation")
+    def update_transform(self, transform_id, body, params=None, headers=None):
+        """
+        Updates certain properties of a transform.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/update-transform.html>`_
+
+        :arg transform_id: The id of the transform.
+        :arg body: The update transform definition
+        :arg defer_validation: If validations should be deferred until
+            transform starts, defaults to false.
+        """
+        for param in (transform_id, body):
+            if param in SKIP_IN_PATH:
+                raise ValueError("Empty value passed for a required argument.")
+
+        return self.transport.perform_request(
+            "POST",
+            _make_path("_transform", transform_id, "_update"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
diff --git a/elasticsearch_7/client/utils.py b/elasticsearch_7/client/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8241fc2387f08d6fa96b7ee53bde2ffbaa30d5b
--- /dev/null
+++ b/elasticsearch_7/client/utils.py
@@ -0,0 +1,175 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from __future__ import unicode_literals
+
+import weakref
+from datetime import date, datetime
+from functools import wraps
+from ..compat import string_types, quote, PY2, unquote, urlparse
+
+# parts of URL to be omitted
+SKIP_IN_PATH = (None, "", b"", [], ())
+
+
+def _normalize_hosts(hosts):
+    """
+    Helper function to transform hosts argument to
+    :class:`~elasticsearch.Elasticsearch` to a list of dicts.
+    """
+    # if hosts are empty, just defer to defaults down the line
+    if hosts is None:
+        return [{}]
+
+    # passed in just one string
+    if isinstance(hosts, string_types):
+        hosts = [hosts]
+
+    out = []
+    # normalize hosts to dicts
+    for host in hosts:
+        if isinstance(host, string_types):
+            if "://" not in host:
+                host = "//%s" % host
+
+            parsed_url = urlparse(host)
+            h = {"host": parsed_url.hostname}
+
+            if parsed_url.port:
+                h["port"] = parsed_url.port
+
+            if parsed_url.scheme == "https":
+                h["port"] = parsed_url.port or 443
+                h["use_ssl"] = True
+
+            if parsed_url.username or parsed_url.password:
+                h["http_auth"] = "%s:%s" % (
+                    unquote(parsed_url.username),
+                    unquote(parsed_url.password),
+                )
+
+            if parsed_url.path and parsed_url.path != "/":
+                h["url_prefix"] = parsed_url.path
+
+            out.append(h)
+        else:
+            out.append(host)
+    return out
+
+
+def _escape(value):
+    """
+    Escape a single value of a URL string or a query parameter. If it is a list
+    or tuple, turn it into a comma-separated string first.
+    """
+
+    # make sequences into comma-separated stings
+    if isinstance(value, (list, tuple)):
+        value = ",".join(value)
+
+    # dates and datetimes into isoformat
+    elif isinstance(value, (date, datetime)):
+        value = value.isoformat()
+
+    # make bools into true/false strings
+    elif isinstance(value, bool):
+        value = str(value).lower()
+
+    # don't decode bytestrings
+    elif isinstance(value, bytes):
+        return value
+
+    # encode strings to utf-8
+    if isinstance(value, string_types):
+        if PY2 and isinstance(value, unicode):  # noqa: F821
+            return value.encode("utf-8")
+        if not PY2 and isinstance(value, str):
+            return value.encode("utf-8")
+
+    return str(value)
+
+
+def _make_path(*parts):
+    """
+    Create a URL string from parts, omit all `None` values and empty strings.
+    Convert lists and tuples to comma separated values.
+    """
+    # TODO: maybe only allow some parts to be lists/tuples ?
+    return "/" + "/".join(
+        # preserve ',' and '*' in url for nicer URLs in logs
+        quote(_escape(p), b",*")
+        for p in parts
+        if p not in SKIP_IN_PATH
+    )
+
+
+# parameters that apply to all methods
+GLOBAL_PARAMS = ("pretty", "human", "error_trace", "format", "filter_path")
+
+
+def query_params(*es_query_params):
+    """
+    Decorator that pops all accepted parameters from method's kwargs and puts
+    them in the params argument.
+    """
+
+    def _wrapper(func):
+        @wraps(func)
+        def _wrapped(*args, **kwargs):
+            params = (kwargs.pop("params", None) or {}).copy()
+            headers = {
+                k.lower(): v
+                for k, v in (kwargs.pop("headers", None) or {}).copy().items()
+            }
+
+            if "opaque_id" in kwargs:
+                headers["x-opaque-id"] = kwargs.pop("opaque_id")
+
+            for p in es_query_params + GLOBAL_PARAMS:
+                if p in kwargs:
+                    v = kwargs.pop(p)
+                    if v is not None:
+                        params[p] = _escape(v)
+
+            # don't treat ignore, request_timeout, and opaque_id as other params to avoid escaping
+            for p in ("ignore", "request_timeout"):
+                if p in kwargs:
+                    params[p] = kwargs.pop(p)
+            return func(*args, params=params, headers=headers, **kwargs)
+
+        return _wrapped
+
+    return _wrapper
+
+
+def _bulk_body(serializer, body):
+    # if not passed in a string, serialize items and join by newline
+    if not isinstance(body, string_types):
+        body = "\n".join(map(serializer.dumps, body))
+
+    # bulk body must end with a newline
+    if isinstance(body, bytes):
+        if not body.endswith(b"\n"):
+            body += b"\n"
+    elif isinstance(body, string_types) and not body.endswith("\n"):
+        body += "\n"
+
+    return body
+
+
+class NamespacedClient(object):
+    def __init__(self, client):
+        self.client = client
+
+    @property
+    def transport(self):
+        return self.client.transport
+
+
+class AddonClient(NamespacedClient):
+    @classmethod
+    def infect_client(cls, client):
+        addon = cls(weakref.proxy(client))
+        setattr(client, cls.namespace, addon)
+        return client
diff --git a/elasticsearch_7/client/watcher.py b/elasticsearch_7/client/watcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3f38a5c1d31d17720e721196befc56d8b8fb6c2
--- /dev/null
+++ b/elasticsearch_7/client/watcher.py
@@ -0,0 +1,180 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
+
+
+class WatcherClient(NamespacedClient):
+    @query_params()
+    def ack_watch(self, watch_id, action_id=None, params=None, headers=None):
+        """
+        Acknowledges a watch, manually throttling the execution of the watch's actions.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-ack-watch.html>`_
+
+        :arg watch_id: Watch ID
+        :arg action_id: A comma-separated list of the action ids to be
+            acked
+        """
+        if watch_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'watch_id'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_watcher", "watch", watch_id, "_ack", action_id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def activate_watch(self, watch_id, params=None, headers=None):
+        """
+        Activates a currently inactive watch.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-activate-watch.html>`_
+
+        :arg watch_id: Watch ID
+        """
+        if watch_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'watch_id'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_watcher", "watch", watch_id, "_activate"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def deactivate_watch(self, watch_id, params=None, headers=None):
+        """
+        Deactivates a currently active watch.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-deactivate-watch.html>`_
+
+        :arg watch_id: Watch ID
+        """
+        if watch_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'watch_id'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_watcher", "watch", watch_id, "_deactivate"),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def delete_watch(self, id, params=None, headers=None):
+        """
+        Removes a watch from Watcher.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-delete-watch.html>`_
+
+        :arg id: Watch ID
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "DELETE",
+            _make_path("_watcher", "watch", id),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params("debug")
+    def execute_watch(self, body=None, id=None, params=None, headers=None):
+        """
+        Forces the execution of a stored watch.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-execute-watch.html>`_
+
+        :arg body: Execution control
+        :arg id: Watch ID
+        :arg debug: indicates whether the watch should execute in debug
+            mode
+        """
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_watcher", "watch", id, "_execute"),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def get_watch(self, id, params=None, headers=None):
+        """
+        Retrieves a watch by its ID.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-get-watch.html>`_
+
+        :arg id: Watch ID
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "GET", _make_path("_watcher", "watch", id), params=params, headers=headers
+        )
+
+    @query_params("active", "if_primary_term", "if_seq_no", "version")
+    def put_watch(self, id, body=None, params=None, headers=None):
+        """
+        Creates a new watch, or updates an existing one.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-put-watch.html>`_
+
+        :arg id: Watch ID
+        :arg body: The watch
+        :arg active: Specify whether the watch is in/active by default
+        :arg if_primary_term: only update the watch if the last
+            operation that has changed the watch has the specified primary term
+        :arg if_seq_no: only update the watch if the last operation that
+            has changed the watch has the specified sequence number
+        :arg version: Explicit version number for concurrency control
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for a required argument 'id'.")
+
+        return self.transport.perform_request(
+            "PUT",
+            _make_path("_watcher", "watch", id),
+            params=params,
+            headers=headers,
+            body=body,
+        )
+
+    @query_params()
+    def start(self, params=None, headers=None):
+        """
+        Starts Watcher if it is not already running.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-start.html>`_
+        """
+        return self.transport.perform_request(
+            "POST", "/_watcher/_start", params=params, headers=headers
+        )
+
+    @query_params("emit_stacktraces")
+    def stats(self, metric=None, params=None, headers=None):
+        """
+        Retrieves the current Watcher metrics.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-stats.html>`_
+
+        :arg metric: Controls what additional stat metrics should be
+            include in the response  Valid choices: _all, queued_watches,
+            current_watches, pending_watches
+        :arg emit_stacktraces: Emits stack traces of currently running
+            watches
+        """
+        return self.transport.perform_request(
+            "GET",
+            _make_path("_watcher", "stats", metric),
+            params=params,
+            headers=headers,
+        )
+
+    @query_params()
+    def stop(self, params=None, headers=None):
+        """
+        Stops Watcher if it is running.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/watcher-api-stop.html>`_
+        """
+        return self.transport.perform_request(
+            "POST", "/_watcher/_stop", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/client/xpack.py b/elasticsearch_7/client/xpack.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a3906a8b7fda1e20bb38c2007d41eac36f36c10
--- /dev/null
+++ b/elasticsearch_7/client/xpack.py
@@ -0,0 +1,36 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .utils import NamespacedClient, query_params
+
+
+class XPackClient(NamespacedClient):
+    def __getattr__(self, attr_name):
+        return getattr(self.client, attr_name)
+
+    # AUTO-GENERATED-API-DEFINITIONS #
+    @query_params("categories")
+    def info(self, params=None, headers=None):
+        """
+        Retrieves information about the installed X-Pack features.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/info-api.html>`_
+
+        :arg categories: Comma-separated list of info categories. Can be
+            any of: build, license, features
+        """
+        return self.transport.perform_request(
+            "GET", "/_xpack", params=params, headers=headers
+        )
+
+    @query_params("master_timeout")
+    def usage(self, params=None, headers=None):
+        """
+        Retrieves usage information about the installed X-Pack features.
+        `<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/usage-api.html>`_
+
+        :arg master_timeout: Specify timeout for watch write operation
+        """
+        return self.transport.perform_request(
+            "GET", "/_xpack/usage", params=params, headers=headers
+        )
diff --git a/elasticsearch_7/compat.py b/elasticsearch_7/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..31040381ebbdb7860e87b099ac5bdac1a46600e8
--- /dev/null
+++ b/elasticsearch_7/compat.py
@@ -0,0 +1,31 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import sys
+
+PY2 = sys.version_info[0] == 2
+
+if PY2:
+    string_types = (basestring,)  # noqa: F821
+    from urllib import quote_plus, quote, urlencode, unquote
+    from urlparse import urlparse
+    from itertools import imap as map
+    from Queue import Queue
+else:
+    string_types = str, bytes
+    from urllib.parse import quote, quote_plus, urlencode, urlparse, unquote
+
+    map = map
+    from queue import Queue
+
+__all__ = [
+    "string_types",
+    "quote_plus",
+    "quote",
+    "urlencode",
+    "unquote",
+    "urlparse",
+    "map",
+    "Queue",
+]
diff --git a/elasticsearch_7/connection/__init__.py b/elasticsearch_7/connection/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7785e3296353266964d39e1b1ddf819ab0fe5aa7
--- /dev/null
+++ b/elasticsearch_7/connection/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from .base import Connection
+from .http_requests import RequestsHttpConnection
+from .http_urllib3 import Urllib3HttpConnection, create_ssl_context
+
+__all__ = [
+    "Connection",
+    "RequestsHttpConnection",
+    "Urllib3HttpConnection",
+    "create_ssl_context",
+]
diff --git a/elasticsearch_7/connection/__pycache__/__init__.cpython-38.pyc b/elasticsearch_7/connection/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4ad5dfa534b026977030fe0f158ce101b9abbe2d
Binary files /dev/null and b/elasticsearch_7/connection/__pycache__/__init__.cpython-38.pyc differ
diff --git a/elasticsearch_7/connection/__pycache__/base.cpython-38.pyc b/elasticsearch_7/connection/__pycache__/base.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9b6ae22dca40a70d5b26ba0268abae3ef0272af9
Binary files /dev/null and b/elasticsearch_7/connection/__pycache__/base.cpython-38.pyc differ
diff --git a/elasticsearch_7/connection/__pycache__/http_requests.cpython-38.pyc b/elasticsearch_7/connection/__pycache__/http_requests.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c6494f601534ec07c363d386cae86762be9bf19
Binary files /dev/null and b/elasticsearch_7/connection/__pycache__/http_requests.cpython-38.pyc differ
diff --git a/elasticsearch_7/connection/__pycache__/http_urllib3.cpython-38.pyc b/elasticsearch_7/connection/__pycache__/http_urllib3.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f45843bbcf3a7dd91d8d946e1c8b8d1c778425c6
Binary files /dev/null and b/elasticsearch_7/connection/__pycache__/http_urllib3.cpython-38.pyc differ
diff --git a/elasticsearch_7/connection/__pycache__/pooling.cpython-38.pyc b/elasticsearch_7/connection/__pycache__/pooling.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b66ccf9f46e99f36b02168688ffd3c94765e534
Binary files /dev/null and b/elasticsearch_7/connection/__pycache__/pooling.cpython-38.pyc differ
diff --git a/elasticsearch_7/connection/base.py b/elasticsearch_7/connection/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..c936830993814a4ad02dfcde6dd01c34e2b76c43
--- /dev/null
+++ b/elasticsearch_7/connection/base.py
@@ -0,0 +1,303 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import logging
+import binascii
+import gzip
+import io
+import re
+from platform import python_version
+import warnings
+
+try:
+    import simplejson as json
+except ImportError:
+    import json
+
+from ..exceptions import (
+    TransportError,
+    ImproperlyConfigured,
+    ElasticsearchDeprecationWarning,
+    HTTP_EXCEPTIONS,
+)
+from .. import __versionstr__
+
+logger = logging.getLogger("elasticsearch")
+
+# create the elasticsearch.trace logger, but only set propagate to False if the
+# logger hasn't already been configured
+_tracer_already_configured = "elasticsearch.trace" in logging.Logger.manager.loggerDict
+tracer = logging.getLogger("elasticsearch.trace")
+if not _tracer_already_configured:
+    tracer.propagate = False
+
+_WARNING_RE = re.compile(r"\"([^\"]*)\"")
+
+
+class Connection(object):
+    """
+    Class responsible for maintaining a connection to an Elasticsearch node. It
+    holds persistent connection pool to it and it's main interface
+    (`perform_request`) is thread-safe.
+
+    Also responsible for logging.
+
+    :arg host: hostname of the node (default: localhost)
+    :arg port: port to use (integer, default: 9200)
+    :arg use_ssl: use ssl for the connection if `True`
+    :arg url_prefix: optional url prefix for elasticsearch
+    :arg timeout: default timeout in seconds (float, default: 10)
+    :arg http_compress: Use gzip compression
+    :arg cloud_id: The Cloud ID from ElasticCloud. Convenient way to connect to cloud instances.
+    :arg opaque_id: Send this value in the 'X-Opaque-Id' HTTP header
+        For tracing all requests made by this transport.
+    """
+
+    def __init__(
+        self,
+        host="localhost",
+        port=None,
+        use_ssl=False,
+        url_prefix="",
+        timeout=10,
+        headers=None,
+        http_compress=None,
+        cloud_id=None,
+        api_key=None,
+        opaque_id=None,
+        **kwargs
+    ):
+
+        if cloud_id:
+            try:
+                _, cloud_id = cloud_id.split(":")
+                parent_dn, es_uuid = (
+                    binascii.a2b_base64(cloud_id.encode("utf-8"))
+                    .decode("utf-8")
+                    .split("$")[:2]
+                )
+                if ":" in parent_dn:
+                    parent_dn, _, parent_port = parent_dn.rpartition(":")
+                    if port is None and parent_port != "443":
+                        port = int(parent_port)
+            except (ValueError, IndexError):
+                raise ImproperlyConfigured("'cloud_id' is not properly formatted")
+
+            host = "%s.%s" % (es_uuid, parent_dn)
+            use_ssl = True
+            if http_compress is None:
+                http_compress = True
+
+        # If cloud_id isn't set and port is default then use 9200.
+        # Cloud should use '443' by default via the 'https' scheme.
+        elif port is None:
+            port = 9200
+
+        # Work-around if the implementing class doesn't
+        # define the headers property before calling super().__init__()
+        if not hasattr(self, "headers"):
+            self.headers = {}
+
+        headers = headers or {}
+        for key in headers:
+            self.headers[key.lower()] = headers[key]
+        if opaque_id:
+            self.headers["x-opaque-id"] = opaque_id
+
+        self.headers.setdefault("content-type", "application/json")
+        self.headers.setdefault("user-agent", self._get_default_user_agent())
+
+        if api_key is not None:
+            self.headers["authorization"] = self._get_api_key_header_val(api_key)
+
+        if http_compress:
+            self.headers["accept-encoding"] = "gzip,deflate"
+
+        scheme = kwargs.get("scheme", "http")
+        if use_ssl or scheme == "https":
+            scheme = "https"
+            use_ssl = True
+        self.use_ssl = use_ssl
+        self.http_compress = http_compress or False
+
+        self.scheme = scheme
+        self.hostname = host
+        self.port = port
+        self.host = "%s://%s" % (scheme, host)
+        if self.port is not None:
+            self.host += ":%s" % self.port
+        if url_prefix:
+            url_prefix = "/" + url_prefix.strip("/")
+        self.url_prefix = url_prefix
+        self.timeout = timeout
+
+    def __repr__(self):
+        return "<%s: %s>" % (self.__class__.__name__, self.host)
+
+    def __eq__(self, other):
+        if not isinstance(other, Connection):
+            raise TypeError("Unsupported equality check for %s and %s" % (self, other))
+        return self.__hash__() == other.__hash__()
+
+    def __hash__(self):
+        return id(self)
+
+    def _gzip_compress(self, body):
+        buf = io.BytesIO()
+        with gzip.GzipFile(fileobj=buf, mode="wb") as f:
+            f.write(body)
+        return buf.getvalue()
+
+    def _raise_warnings(self, warning_headers):
+        """If 'headers' contains a 'Warning' header raise
+        the warnings to be seen by the user. Takes an iterable
+        of string values from any number of 'Warning' headers.
+        """
+        if not warning_headers:
+            return
+
+        # Grab only the message from each header, the rest is discarded.
+        # Format is: '(number) Elasticsearch-(version)-(instance) "(message)"'
+        warning_messages = []
+        for header in warning_headers:
+            # Because 'Requests' does it's own folding of multiple HTTP headers
+            # into one header delimited by commas (totally standard compliant, just
+            # annoying for cases like this) we need to expect there may be
+            # more than one message per 'Warning' header.
+            matches = _WARNING_RE.findall(header)
+            if matches:
+                warning_messages.extend(matches)
+            else:
+                # Don't want to throw away any warnings, even if they
+                # don't follow the format we have now. Use the whole header.
+                warning_messages.append(header)
+
+        for message in warning_messages:
+            warnings.warn(message, category=ElasticsearchDeprecationWarning)
+
+    def _pretty_json(self, data):
+        # pretty JSON in tracer curl logs
+        try:
+            return json.dumps(
+                json.loads(data), sort_keys=True, indent=2, separators=(",", ": ")
+            ).replace("'", r"\u0027")
+        except (ValueError, TypeError):
+            # non-json data or a bulk request
+            return data
+
+    def _log_trace(self, method, path, body, status_code, response, duration):
+        if not tracer.isEnabledFor(logging.INFO) or not tracer.handlers:
+            return
+
+        # include pretty in trace curls
+        path = path.replace("?", "?pretty&", 1) if "?" in path else path + "?pretty"
+        if self.url_prefix:
+            path = path.replace(self.url_prefix, "", 1)
+        tracer.info(
+            "curl %s-X%s 'http://localhost:9200%s' -d '%s'",
+            "-H 'Content-Type: application/json' " if body else "",
+            method,
+            path,
+            self._pretty_json(body) if body else "",
+        )
+
+        if tracer.isEnabledFor(logging.DEBUG):
+            tracer.debug(
+                "#[%s] (%.3fs)\n#%s",
+                status_code,
+                duration,
+                self._pretty_json(response).replace("\n", "\n#") if response else "",
+            )
+
+    def log_request_success(
+        self, method, full_url, path, body, status_code, response, duration
+    ):
+        """ Log a successful API call.  """
+        #  TODO: optionally pass in params instead of full_url and do urlencode only when needed
+
+        # body has already been serialized to utf-8, deserialize it for logging
+        # TODO: find a better way to avoid (de)encoding the body back and forth
+        if body:
+            try:
+                body = body.decode("utf-8", "ignore")
+            except AttributeError:
+                pass
+
+        logger.info(
+            "%s %s [status:%s request:%.3fs]", method, full_url, status_code, duration
+        )
+        logger.debug("> %s", body)
+        logger.debug("< %s", response)
+
+        self._log_trace(method, path, body, status_code, response, duration)
+
+    def log_request_fail(
+        self,
+        method,
+        full_url,
+        path,
+        body,
+        duration,
+        status_code=None,
+        response=None,
+        exception=None,
+    ):
+        """ Log an unsuccessful API call.  """
+        # do not log 404s on HEAD requests
+        if method == "HEAD" and status_code == 404:
+            return
+        logger.warning(
+            "%s %s [status:%s request:%.3fs]",
+            method,
+            full_url,
+            status_code or "N/A",
+            duration,
+            exc_info=exception is not None,
+        )
+
+        # body has already been serialized to utf-8, deserialize it for logging
+        # TODO: find a better way to avoid (de)encoding the body back and forth
+        if body:
+            try:
+                body = body.decode("utf-8", "ignore")
+            except AttributeError:
+                pass
+
+        logger.debug("> %s", body)
+
+        self._log_trace(method, path, body, status_code, response, duration)
+
+        if response is not None:
+            logger.debug("< %s", response)
+
+    def _raise_error(self, status_code, raw_data):
+        """ Locate appropriate exception and raise it. """
+        error_message = raw_data
+        additional_info = None
+        try:
+            if raw_data:
+                additional_info = json.loads(raw_data)
+                error_message = additional_info.get("error", error_message)
+                if isinstance(error_message, dict) and "type" in error_message:
+                    error_message = error_message["type"]
+        except (ValueError, TypeError) as err:
+            logger.warning("Undecodable raw error response from server: %s", err)
+
+        raise HTTP_EXCEPTIONS.get(status_code, TransportError)(
+            status_code, error_message, additional_info
+        )
+
+    def _get_default_user_agent(self):
+        return "elasticsearch-py/%s (Python %s)" % (__versionstr__, python_version())
+
+    def _get_api_key_header_val(self, api_key):
+        """
+        Check the type of the passed api_key and return the correct header value
+        for the `API Key authentication <https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html>`
+        :arg api_key, either a tuple or a base64 encoded string
+        """
+        if isinstance(api_key, (tuple, list)):
+            s = "{0}:{1}".format(api_key[0], api_key[1]).encode("utf-8")
+            return "ApiKey " + binascii.b2a_base64(s).rstrip(b"\r\n").decode("utf-8")
+        return "ApiKey " + api_key
diff --git a/elasticsearch_7/connection/http_requests.py b/elasticsearch_7/connection/http_requests.py
new file mode 100644
index 0000000000000000000000000000000000000000..1405c7c9a3ea8ae05fde7a7e8d4511341d4ace8d
--- /dev/null
+++ b/elasticsearch_7/connection/http_requests.py
@@ -0,0 +1,205 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import time
+import warnings
+
+try:
+    import requests
+
+    REQUESTS_AVAILABLE = True
+except ImportError:
+    REQUESTS_AVAILABLE = False
+
+from .base import Connection
+from ..exceptions import (
+    ConnectionError,
+    ImproperlyConfigured,
+    ConnectionTimeout,
+    SSLError,
+)
+from ..compat import urlencode, string_types
+
+
+class RequestsHttpConnection(Connection):
+    """
+    Connection using the `requests` library.
+
+    :arg http_auth: optional http auth information as either ':' separated
+        string or a tuple. Any value will be passed into requests as `auth`.
+    :arg use_ssl: use ssl for the connection if `True`
+    :arg verify_certs: whether to verify SSL certificates
+    :arg ssl_show_warn: show warning when verify certs is disabled
+    :arg ca_certs: optional path to CA bundle. By default standard requests'
+        bundle will be used.
+    :arg client_cert: path to the file containing the private key and the
+        certificate, or cert only if using client_key
+    :arg client_key: path to the file containing the private key if using
+        separate cert and key files (client_cert will contain only the cert)
+    :arg headers: any custom http headers to be add to requests
+    :arg http_compress: Use gzip compression
+    :arg cloud_id: The Cloud ID from ElasticCloud. Convenient way to connect to cloud instances.
+        Other host connection params will be ignored.
+    :arg api_key: optional API Key authentication as either base64 encoded string or a tuple.
+    :arg opaque_id: Send this value in the 'X-Opaque-Id' HTTP header
+        For tracing all requests made by this transport.
+    """
+
+    def __init__(
+        self,
+        host="localhost",
+        port=None,
+        http_auth=None,
+        use_ssl=False,
+        verify_certs=True,
+        ssl_show_warn=True,
+        ca_certs=None,
+        client_cert=None,
+        client_key=None,
+        headers=None,
+        http_compress=None,
+        cloud_id=None,
+        api_key=None,
+        opaque_id=None,
+        **kwargs
+    ):
+        if not REQUESTS_AVAILABLE:
+            raise ImproperlyConfigured(
+                "Please install requests to use RequestsHttpConnection."
+            )
+
+        # Initialize Session so .headers works before calling super().__init__().
+        self.session = requests.Session()
+        for key in list(self.session.headers):
+            self.session.headers.pop(key)
+
+        super(RequestsHttpConnection, self).__init__(
+            host=host,
+            port=port,
+            use_ssl=use_ssl,
+            headers=headers,
+            http_compress=http_compress,
+            cloud_id=cloud_id,
+            api_key=api_key,
+            opaque_id=opaque_id,
+            **kwargs
+        )
+
+        if not self.http_compress:
+            # Need to set this to 'None' otherwise Requests adds its own.
+            self.session.headers["accept-encoding"] = None
+
+        if http_auth is not None:
+            if isinstance(http_auth, (tuple, list)):
+                http_auth = tuple(http_auth)
+            elif isinstance(http_auth, string_types):
+                http_auth = tuple(http_auth.split(":", 1))
+            self.session.auth = http_auth
+
+        self.base_url = "%s%s" % (self.host, self.url_prefix,)
+        self.session.verify = verify_certs
+        if not client_key:
+            self.session.cert = client_cert
+        elif client_cert:
+            # cert is a tuple of (certfile, keyfile)
+            self.session.cert = (client_cert, client_key)
+        if ca_certs:
+            if not verify_certs:
+                raise ImproperlyConfigured(
+                    "You cannot pass CA certificates when verify SSL is off."
+                )
+            self.session.verify = ca_certs
+
+        if not ssl_show_warn:
+            requests.packages.urllib3.disable_warnings()
+
+        if self.use_ssl and not verify_certs and ssl_show_warn:
+            warnings.warn(
+                "Connecting to %s using SSL with verify_certs=False is insecure."
+                % self.host
+            )
+
+    def perform_request(
+        self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None
+    ):
+        url = self.base_url + url
+        headers = headers or {}
+        if params:
+            url = "%s?%s" % (url, urlencode(params or {}))
+
+        orig_body = body
+        if self.http_compress and body:
+            body = self._gzip_compress(body)
+            headers["content-encoding"] = "gzip"
+
+        start = time.time()
+        request = requests.Request(method=method, headers=headers, url=url, data=body)
+        prepared_request = self.session.prepare_request(request)
+        settings = self.session.merge_environment_settings(
+            prepared_request.url, {}, None, None, None
+        )
+        send_kwargs = {"timeout": timeout or self.timeout}
+        send_kwargs.update(settings)
+        try:
+            response = self.session.send(prepared_request, **send_kwargs)
+            duration = time.time() - start
+            raw_data = response.content.decode("utf-8", "surrogatepass")
+        except Exception as e:
+            self.log_request_fail(
+                method,
+                url,
+                prepared_request.path_url,
+                body,
+                time.time() - start,
+                exception=e,
+            )
+            if isinstance(e, requests.exceptions.SSLError):
+                raise SSLError("N/A", str(e), e)
+            if isinstance(e, requests.Timeout):
+                raise ConnectionTimeout("TIMEOUT", str(e), e)
+            raise ConnectionError("N/A", str(e), e)
+
+        # raise warnings if any from the 'Warnings' header.
+        warnings_headers = (
+            (response.headers["warning"],) if "warning" in response.headers else ()
+        )
+        self._raise_warnings(warnings_headers)
+
+        # raise errors based on http status codes, let the client handle those if needed
+        if (
+            not (200 <= response.status_code < 300)
+            and response.status_code not in ignore
+        ):
+            self.log_request_fail(
+                method,
+                url,
+                response.request.path_url,
+                orig_body,
+                duration,
+                response.status_code,
+                raw_data,
+            )
+            self._raise_error(response.status_code, raw_data)
+
+        self.log_request_success(
+            method,
+            url,
+            response.request.path_url,
+            orig_body,
+            response.status_code,
+            raw_data,
+            duration,
+        )
+
+        return response.status_code, response.headers, raw_data
+
+    @property
+    def headers(self):
+        return self.session.headers
+
+    def close(self):
+        """
+        Explicitly closes connections
+        """
+        self.session.close()
diff --git a/elasticsearch_7/connection/http_urllib3.py b/elasticsearch_7/connection/http_urllib3.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b95ecb1d5f44201527af849811e519703bf8733
--- /dev/null
+++ b/elasticsearch_7/connection/http_urllib3.py
@@ -0,0 +1,268 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import time
+import ssl
+import urllib3
+from urllib3.exceptions import ReadTimeoutError, SSLError as UrllibSSLError
+from urllib3.util.retry import Retry
+import warnings
+
+from .base import Connection
+from ..exceptions import (
+    ConnectionError,
+    ImproperlyConfigured,
+    ConnectionTimeout,
+    SSLError,
+)
+from ..compat import urlencode
+
+# sentinel value for `verify_certs` and `ssl_show_warn`.
+# This is used to detect if a user is passing in a value
+# for SSL kwargs if also using an SSLContext.
+VERIFY_CERTS_DEFAULT = object()
+SSL_SHOW_WARN_DEFAULT = object()
+
+CA_CERTS = None
+
+try:
+    import certifi
+
+    CA_CERTS = certifi.where()
+except ImportError:
+    pass
+
+
+def create_ssl_context(**kwargs):
+    """
+    A helper function around creating an SSL context
+
+    https://docs.python.org/3/library/ssl.html#context-creation
+
+    Accepts kwargs in the same manner as `create_default_context`.
+    """
+    ctx = ssl.create_default_context(**kwargs)
+    return ctx
+
+
+class Urllib3HttpConnection(Connection):
+    """
+    Default connection class using the `urllib3` library and the http protocol.
+
+    :arg host: hostname of the node (default: localhost)
+    :arg port: port to use (integer, default: 9200)
+    :arg url_prefix: optional url prefix for elasticsearch
+    :arg timeout: default timeout in seconds (float, default: 10)
+    :arg http_auth: optional http auth information as either ':' separated
+        string or a tuple
+    :arg use_ssl: use ssl for the connection if `True`
+    :arg verify_certs: whether to verify SSL certificates
+    :arg ssl_show_warn: show warning when verify certs is disabled
+    :arg ca_certs: optional path to CA bundle.
+        See https://urllib3.readthedocs.io/en/latest/security.html#using-certifi-with-urllib3
+        for instructions how to get default set
+    :arg client_cert: path to the file containing the private key and the
+        certificate, or cert only if using client_key
+    :arg client_key: path to the file containing the private key if using
+        separate cert and key files (client_cert will contain only the cert)
+    :arg ssl_version: version of the SSL protocol to use. Choices are:
+        SSLv23 (default) SSLv2 SSLv3 TLSv1 (see ``PROTOCOL_*`` constants in the
+        ``ssl`` module for exact options for your environment).
+    :arg ssl_assert_hostname: use hostname verification if not `False`
+    :arg ssl_assert_fingerprint: verify the supplied certificate fingerprint if not `None`
+    :arg maxsize: the number of connections which will be kept open to this
+        host. See https://urllib3.readthedocs.io/en/1.4/pools.html#api for more
+        information.
+    :arg headers: any custom http headers to be add to requests
+    :arg http_compress: Use gzip compression
+    :arg cloud_id: The Cloud ID from ElasticCloud. Convenient way to connect to cloud instances.
+        Other host connection params will be ignored.
+    :arg api_key: optional API Key authentication as either base64 encoded string or a tuple.
+    :arg opaque_id: Send this value in the 'X-Opaque-Id' HTTP header
+        For tracing all requests made by this transport.
+    """
+
+    def __init__(
+        self,
+        host="localhost",
+        port=None,
+        http_auth=None,
+        use_ssl=False,
+        verify_certs=VERIFY_CERTS_DEFAULT,
+        ssl_show_warn=SSL_SHOW_WARN_DEFAULT,
+        ca_certs=None,
+        client_cert=None,
+        client_key=None,
+        ssl_version=None,
+        ssl_assert_hostname=None,
+        ssl_assert_fingerprint=None,
+        maxsize=10,
+        headers=None,
+        ssl_context=None,
+        http_compress=None,
+        cloud_id=None,
+        api_key=None,
+        opaque_id=None,
+        **kwargs
+    ):
+        # Initialize headers before calling super().__init__().
+        self.headers = urllib3.make_headers(keep_alive=True)
+
+        super(Urllib3HttpConnection, self).__init__(
+            host=host,
+            port=port,
+            use_ssl=use_ssl,
+            headers=headers,
+            http_compress=http_compress,
+            cloud_id=cloud_id,
+            api_key=api_key,
+            opaque_id=opaque_id,
+            **kwargs
+        )
+        if http_auth is not None:
+            if isinstance(http_auth, (tuple, list)):
+                http_auth = ":".join(http_auth)
+            self.headers.update(urllib3.make_headers(basic_auth=http_auth))
+
+        pool_class = urllib3.HTTPConnectionPool
+        kw = {}
+
+        # if providing an SSL context, raise error if any other SSL related flag is used
+        if ssl_context and (
+            (verify_certs is not VERIFY_CERTS_DEFAULT)
+            or (ssl_show_warn is not SSL_SHOW_WARN_DEFAULT)
+            or ca_certs
+            or client_cert
+            or client_key
+            or ssl_version
+        ):
+            warnings.warn(
+                "When using `ssl_context`, all other SSL related kwargs are ignored"
+            )
+
+        # if ssl_context provided use SSL by default
+        if ssl_context and self.use_ssl:
+            pool_class = urllib3.HTTPSConnectionPool
+            kw.update(
+                {
+                    "assert_fingerprint": ssl_assert_fingerprint,
+                    "ssl_context": ssl_context,
+                }
+            )
+
+        elif self.use_ssl:
+            pool_class = urllib3.HTTPSConnectionPool
+            kw.update(
+                {
+                    "ssl_version": ssl_version,
+                    "assert_hostname": ssl_assert_hostname,
+                    "assert_fingerprint": ssl_assert_fingerprint,
+                }
+            )
+
+            # Convert all sentinel values to their actual default
+            # values if not using an SSLContext.
+            if verify_certs is VERIFY_CERTS_DEFAULT:
+                verify_certs = True
+            if ssl_show_warn is SSL_SHOW_WARN_DEFAULT:
+                ssl_show_warn = True
+
+            ca_certs = CA_CERTS if ca_certs is None else ca_certs
+            if verify_certs:
+                if not ca_certs:
+                    raise ImproperlyConfigured(
+                        "Root certificates are missing for certificate "
+                        "validation. Either pass them in using the ca_certs parameter or "
+                        "install certifi to use it automatically."
+                    )
+
+                kw.update(
+                    {
+                        "cert_reqs": "CERT_REQUIRED",
+                        "ca_certs": ca_certs,
+                        "cert_file": client_cert,
+                        "key_file": client_key,
+                    }
+                )
+            else:
+                kw["cert_reqs"] = "CERT_NONE"
+                if ssl_show_warn:
+                    warnings.warn(
+                        "Connecting to %s using SSL with verify_certs=False is insecure."
+                        % self.host
+                    )
+                if not ssl_show_warn:
+                    urllib3.disable_warnings()
+
+        self.pool = pool_class(
+            self.hostname, port=self.port, timeout=self.timeout, maxsize=maxsize, **kw
+        )
+
+    def perform_request(
+        self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None
+    ):
+        url = self.url_prefix + url
+        if params:
+            url = "%s?%s" % (url, urlencode(params))
+        full_url = self.host + url
+
+        start = time.time()
+        orig_body = body
+        try:
+            kw = {}
+            if timeout:
+                kw["timeout"] = timeout
+
+            # in python2 we need to make sure the url and method are not
+            # unicode. Otherwise the body will be decoded into unicode too and
+            # that will fail (#133, #201).
+            if not isinstance(url, str):
+                url = url.encode("utf-8")
+            if not isinstance(method, str):
+                method = method.encode("utf-8")
+
+            request_headers = self.headers.copy()
+            request_headers.update(headers or ())
+
+            if self.http_compress and body:
+                body = self._gzip_compress(body)
+                request_headers["content-encoding"] = "gzip"
+
+            response = self.pool.urlopen(
+                method, url, body, retries=Retry(False), headers=request_headers, **kw
+            )
+            duration = time.time() - start
+            raw_data = response.data.decode("utf-8", "surrogatepass")
+        except Exception as e:
+            self.log_request_fail(
+                method, full_url, url, orig_body, time.time() - start, exception=e
+            )
+            if isinstance(e, UrllibSSLError):
+                raise SSLError("N/A", str(e), e)
+            if isinstance(e, ReadTimeoutError):
+                raise ConnectionTimeout("TIMEOUT", str(e), e)
+            raise ConnectionError("N/A", str(e), e)
+
+        # raise warnings if any from the 'Warnings' header.
+        warning_headers = response.headers.get_all("warning", ())
+        self._raise_warnings(warning_headers)
+
+        # raise errors based on http status codes, let the client handle those if needed
+        if not (200 <= response.status < 300) and response.status not in ignore:
+            self.log_request_fail(
+                method, full_url, url, orig_body, duration, response.status, raw_data
+            )
+            self._raise_error(response.status, raw_data)
+
+        self.log_request_success(
+            method, full_url, url, orig_body, response.status, raw_data, duration
+        )
+
+        return response.status, response.getheaders(), raw_data
+
+    def close(self):
+        """
+        Explicitly closes connection
+        """
+        self.pool.close()
diff --git a/elasticsearch_7/connection/pooling.py b/elasticsearch_7/connection/pooling.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f4a4f29fce86278e999a125ead2f490f7335a45
--- /dev/null
+++ b/elasticsearch_7/connection/pooling.py
@@ -0,0 +1,37 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+try:
+    import queue
+except ImportError:
+    import Queue as queue
+from .base import Connection
+
+
+class PoolingConnection(Connection):
+    """
+    Base connection class for connections that use libraries without thread
+    safety and no capacity for connection pooling. To use this just implement a
+    ``_make_connection`` method that constructs a new connection and returns
+    it.
+    """
+
+    def __init__(self, *args, **kwargs):
+        self._free_connections = queue.Queue()
+        super(PoolingConnection, self).__init__(*args, **kwargs)
+
+    def _get_connection(self):
+        try:
+            return self._free_connections.get_nowait()
+        except queue.Empty:
+            return self._make_connection()
+
+    def _release_connection(self, con):
+        self._free_connections.put(con)
+
+    def close(self):
+        """
+        Explicitly close connection
+        """
+        pass
diff --git a/elasticsearch_7/connection_pool.py b/elasticsearch_7/connection_pool.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0951c3e5dd0628dae1056ed37588a52470157f4
--- /dev/null
+++ b/elasticsearch_7/connection_pool.py
@@ -0,0 +1,305 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import time
+import random
+import logging
+import threading
+
+try:
+    from Queue import PriorityQueue, Empty
+except ImportError:
+    from queue import PriorityQueue, Empty
+
+from .exceptions import ImproperlyConfigured
+
+logger = logging.getLogger("elasticsearch")
+
+
+class ConnectionSelector(object):
+    """
+    Simple class used to select a connection from a list of currently live
+    connection instances. In init time it is passed a dictionary containing all
+    the connections' options which it can then use during the selection
+    process. When the `select` method is called it is given a list of
+    *currently* live connections to choose from.
+
+    The options dictionary is the one that has been passed to
+    :class:`~elasticsearch.Transport` as `hosts` param and the same that is
+    used to construct the Connection object itself. When the Connection was
+    created from information retrieved from the cluster via the sniffing
+    process it will be the dictionary returned by the `host_info_callback`.
+
+    Example of where this would be useful is a zone-aware selector that would
+    only select connections from it's own zones and only fall back to other
+    connections where there would be none in it's zones.
+    """
+
+    def __init__(self, opts):
+        """
+        :arg opts: dictionary of connection instances and their options
+        """
+        self.connection_opts = opts
+
+    def select(self, connections):
+        """
+        Select a connection from the given list.
+
+        :arg connections: list of live connections to choose from
+        """
+        pass
+
+
+class RandomSelector(ConnectionSelector):
+    """
+    Select a connection at random
+    """
+
+    def select(self, connections):
+        return random.choice(connections)
+
+
+class RoundRobinSelector(ConnectionSelector):
+    """
+    Selector using round-robin.
+    """
+
+    def __init__(self, opts):
+        super(RoundRobinSelector, self).__init__(opts)
+        self.data = threading.local()
+
+    def select(self, connections):
+        self.data.rr = getattr(self.data, "rr", -1) + 1
+        self.data.rr %= len(connections)
+        return connections[self.data.rr]
+
+
+class ConnectionPool(object):
+    """
+    Container holding the :class:`~elasticsearch.Connection` instances,
+    managing the selection process (via a
+    :class:`~elasticsearch.ConnectionSelector`) and dead connections.
+
+    It's only interactions are with the :class:`~elasticsearch.Transport` class
+    that drives all the actions within `ConnectionPool`.
+
+    Initially connections are stored on the class as a list and, along with the
+    connection options, get passed to the `ConnectionSelector` instance for
+    future reference.
+
+    Upon each request the `Transport` will ask for a `Connection` via the
+    `get_connection` method. If the connection fails (it's `perform_request`
+    raises a `ConnectionError`) it will be marked as dead (via `mark_dead`) and
+    put on a timeout (if it fails N times in a row the timeout is exponentially
+    longer - the formula is `default_timeout * 2 ** (fail_count - 1)`). When
+    the timeout is over the connection will be resurrected and returned to the
+    live pool. A connection that has been previously marked as dead and
+    succeeds will be marked as live (its fail count will be deleted).
+    """
+
+    def __init__(
+        self,
+        connections,
+        dead_timeout=60,
+        timeout_cutoff=5,
+        selector_class=RoundRobinSelector,
+        randomize_hosts=True,
+        **kwargs
+    ):
+        """
+        :arg connections: list of tuples containing the
+            :class:`~elasticsearch.Connection` instance and it's options
+        :arg dead_timeout: number of seconds a connection should be retired for
+            after a failure, increases on consecutive failures
+        :arg timeout_cutoff: number of consecutive failures after which the
+            timeout doesn't increase
+        :arg selector_class: :class:`~elasticsearch.ConnectionSelector`
+            subclass to use if more than one connection is live
+        :arg randomize_hosts: shuffle the list of connections upon arrival to
+            avoid dog piling effect across processes
+        """
+        if not connections:
+            raise ImproperlyConfigured(
+                "No defined connections, you need to " "specify at least one host."
+            )
+        self.connection_opts = connections
+        self.connections = [c for (c, opts) in connections]
+        # remember original connection list for resurrect(force=True)
+        self.orig_connections = tuple(self.connections)
+        # PriorityQueue for thread safety and ease of timeout management
+        self.dead = PriorityQueue(len(self.connections))
+        self.dead_count = {}
+
+        if randomize_hosts:
+            # randomize the connection list to avoid all clients hitting same node
+            # after startup/restart
+            random.shuffle(self.connections)
+
+        # default timeout after which to try resurrecting a connection
+        self.dead_timeout = dead_timeout
+        self.timeout_cutoff = timeout_cutoff
+
+        self.selector = selector_class(dict(connections))
+
+    def mark_dead(self, connection, now=None):
+        """
+        Mark the connection as dead (failed). Remove it from the live pool and
+        put it on a timeout.
+
+        :arg connection: the failed instance
+        """
+        # allow inject for testing purposes
+        now = now if now else time.time()
+        try:
+            self.connections.remove(connection)
+        except ValueError:
+            logger.info(
+                "Attempted to remove %r, but it does not exist in the connection pool.",
+                connection,
+            )
+            # connection not alive or another thread marked it already, ignore
+            return
+        else:
+            dead_count = self.dead_count.get(connection, 0) + 1
+            self.dead_count[connection] = dead_count
+            timeout = self.dead_timeout * 2 ** min(dead_count - 1, self.timeout_cutoff)
+            self.dead.put((now + timeout, connection))
+            logger.warning(
+                "Connection %r has failed for %i times in a row, putting on %i second timeout.",
+                connection,
+                dead_count,
+                timeout,
+            )
+
+    def mark_live(self, connection):
+        """
+        Mark connection as healthy after a resurrection. Resets the fail
+        counter for the connection.
+
+        :arg connection: the connection to redeem
+        """
+        try:
+            del self.dead_count[connection]
+        except KeyError:
+            # race condition, safe to ignore
+            pass
+
+    def resurrect(self, force=False):
+        """
+        Attempt to resurrect a connection from the dead pool. It will try to
+        locate one (not all) eligible (it's timeout is over) connection to
+        return to the live pool. Any resurrected connection is also returned.
+
+        :arg force: resurrect a connection even if there is none eligible (used
+            when we have no live connections). If force is specified resurrect
+            always returns a connection.
+
+        """
+        # no dead connections
+        if self.dead.empty():
+            # we are forced to return a connection, take one from the original
+            # list. This is to avoid a race condition where get_connection can
+            # see no live connections but when it calls resurrect self.dead is
+            # also empty. We assume that other threat has resurrected all
+            # available connections so we can safely return one at random.
+            if force:
+                return random.choice(self.orig_connections)
+            return
+
+        try:
+            # retrieve a connection to check
+            timeout, connection = self.dead.get(block=False)
+        except Empty:
+            # other thread has been faster and the queue is now empty. If we
+            # are forced, return a connection at random again.
+            if force:
+                return random.choice(self.orig_connections)
+            return
+
+        if not force and timeout > time.time():
+            # return it back if not eligible and not forced
+            self.dead.put((timeout, connection))
+            return
+
+        # either we were forced or the connection is elligible to be retried
+        self.connections.append(connection)
+        logger.info("Resurrecting connection %r (force=%s).", connection, force)
+        return connection
+
+    def get_connection(self):
+        """
+        Return a connection from the pool using the `ConnectionSelector`
+        instance.
+
+        It tries to resurrect eligible connections, forces a resurrection when
+        no connections are availible and passes the list of live connections to
+        the selector instance to choose from.
+
+        Returns a connection instance and it's current fail count.
+        """
+        self.resurrect()
+        connections = self.connections[:]
+
+        # no live nodes, resurrect one by force and return it
+        if not connections:
+            return self.resurrect(True)
+
+        # only call selector if we have a selection
+        if len(connections) > 1:
+            return self.selector.select(connections)
+
+        # only one connection, no need for a selector
+        return connections[0]
+
+    def close(self):
+        """
+        Explicitly closes connections
+        """
+        for conn in self.connections:
+            conn.close()
+
+    def __repr__(self):
+        return "<%s: %r>" % (type(self).__name__, self.connections)
+
+
+class DummyConnectionPool(ConnectionPool):
+    def __init__(self, connections, **kwargs):
+        if len(connections) != 1:
+            raise ImproperlyConfigured(
+                "DummyConnectionPool needs exactly one " "connection defined."
+            )
+        # we need connection opts for sniffing logic
+        self.connection_opts = connections
+        self.connection = connections[0][0]
+        self.connections = (self.connection,)
+
+    def get_connection(self):
+        return self.connection
+
+    def close(self):
+        """
+        Explicitly closes connections
+        """
+        self.connection.close()
+
+    def _noop(self, *args, **kwargs):
+        pass
+
+    mark_dead = mark_live = resurrect = _noop
+
+
+class EmptyConnectionPool(ConnectionPool):
+    """A connection pool that is empty. Errors out if used."""
+
+    def __init__(self, *_, **__):
+        self.connections = []
+        self.connection_opts = []
+
+    def get_connection(self):
+        raise ImproperlyConfigured("No connections were configured")
+
+    def _noop(self, *args, **kwargs):
+        pass
+
+    close = mark_dead = mark_live = resurrect = _noop
diff --git a/elasticsearch_7/exceptions.py b/elasticsearch_7/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..131b27f3789e34eebe084f1c8b576cd0cf3eb673
--- /dev/null
+++ b/elasticsearch_7/exceptions.py
@@ -0,0 +1,156 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+__all__ = [
+    "ImproperlyConfigured",
+    "ElasticsearchException",
+    "SerializationError",
+    "TransportError",
+    "NotFoundError",
+    "ConflictError",
+    "RequestError",
+    "ConnectionError",
+    "SSLError",
+    "ConnectionTimeout",
+    "AuthenticationException",
+    "AuthorizationException",
+]
+
+
+class ImproperlyConfigured(Exception):
+    """
+    Exception raised when the config passed to the client is inconsistent or invalid.
+    """
+
+
+class ElasticsearchException(Exception):
+    """
+    Base class for all exceptions raised by this package's operations (doesn't
+    apply to :class:`~elasticsearch.ImproperlyConfigured`).
+    """
+
+
+class SerializationError(ElasticsearchException):
+    """
+    Data passed in failed to serialize properly in the ``Serializer`` being
+    used.
+    """
+
+
+class TransportError(ElasticsearchException):
+    """
+    Exception raised when ES returns a non-OK (>=400) HTTP status code. Or when
+    an actual connection error happens; in that case the ``status_code`` will
+    be set to ``'N/A'``.
+    """
+
+    @property
+    def status_code(self):
+        """
+        The HTTP status code of the response that precipitated the error or
+        ``'N/A'`` if not applicable.
+        """
+        return self.args[0]
+
+    @property
+    def error(self):
+        """ A string error message. """
+        return self.args[1]
+
+    @property
+    def info(self):
+        """
+        Dict of returned error info from ES, where available, underlying
+        exception when not.
+        """
+        return self.args[2]
+
+    def __str__(self):
+        cause = ""
+        try:
+            if self.info and "error" in self.info:
+                if isinstance(self.info["error"], dict):
+                    root_cause = self.info["error"]["root_cause"][0]
+                    cause = ", ".join(
+                        filter(
+                            None,
+                            [
+                                repr(root_cause["reason"]),
+                                root_cause.get("resource.id"),
+                                root_cause.get("resource.type"),
+                            ],
+                        )
+                    )
+
+                else:
+                    cause = repr(self.info["error"])
+        except LookupError:
+            pass
+        msg = ", ".join(filter(None, [str(self.status_code), repr(self.error), cause]))
+        return "%s(%s)" % (self.__class__.__name__, msg)
+
+
+class ConnectionError(TransportError):
+    """
+    Error raised when there was an exception while talking to ES. Original
+    exception from the underlying :class:`~elasticsearch.Connection`
+    implementation is available as ``.info``.
+    """
+
+    def __str__(self):
+        return "ConnectionError(%s) caused by: %s(%s)" % (
+            self.error,
+            self.info.__class__.__name__,
+            self.info,
+        )
+
+
+class SSLError(ConnectionError):
+    """ Error raised when encountering SSL errors. """
+
+
+class ConnectionTimeout(ConnectionError):
+    """ A network timeout. Doesn't cause a node retry by default. """
+
+    def __str__(self):
+        return "ConnectionTimeout caused by - %s(%s)" % (
+            self.info.__class__.__name__,
+            self.info,
+        )
+
+
+class NotFoundError(TransportError):
+    """ Exception representing a 404 status code. """
+
+
+class ConflictError(TransportError):
+    """ Exception representing a 409 status code. """
+
+
+class RequestError(TransportError):
+    """ Exception representing a 400 status code. """
+
+
+class AuthenticationException(TransportError):
+    """ Exception representing a 401 status code. """
+
+
+class AuthorizationException(TransportError):
+    """ Exception representing a 403 status code. """
+
+
+class ElasticsearchDeprecationWarning(Warning):
+    """ Warning that is raised when a deprecated option
+    is flagged via the 'Warning' HTTP header.
+    """
+
+
+# more generic mappings from status_code to python exceptions
+HTTP_EXCEPTIONS = {
+    400: RequestError,
+    401: AuthenticationException,
+    403: AuthorizationException,
+    404: NotFoundError,
+    409: ConflictError,
+}
diff --git a/elasticsearch_7/helpers/__init__.py b/elasticsearch_7/helpers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9618882c39a8919a68231193412c8b4cf49c10c
--- /dev/null
+++ b/elasticsearch_7/helpers/__init__.py
@@ -0,0 +1,39 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import sys
+from .errors import BulkIndexError, ScanError
+from .actions import expand_action, streaming_bulk, bulk, parallel_bulk
+from .actions import scan, reindex
+from .actions import _chunk_actions, _process_bulk_chunk
+
+__all__ = [
+    "BulkIndexError",
+    "ScanError",
+    "expand_action",
+    "streaming_bulk",
+    "bulk",
+    "parallel_bulk",
+    "scan",
+    "reindex",
+    "_chunk_actions",
+    "_process_bulk_chunk",
+]
+
+
+try:
+    # Asyncio only supported on Python 3.6+
+    if sys.version_info < (3, 6):
+        raise ImportError
+
+    from .._async.helpers import (
+        async_scan,
+        async_bulk,
+        async_reindex,
+        async_streaming_bulk,
+    )
+
+    __all__ += ["async_scan", "async_bulk", "async_reindex", "async_streaming_bulk"]
+except (ImportError, SyntaxError):
+    pass
diff --git a/elasticsearch_7/helpers/__pycache__/__init__.cpython-38.pyc b/elasticsearch_7/helpers/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..96a17b87a399acad2996d54d859c75c9dd74f452
Binary files /dev/null and b/elasticsearch_7/helpers/__pycache__/__init__.cpython-38.pyc differ
diff --git a/elasticsearch_7/helpers/__pycache__/actions.cpython-38.pyc b/elasticsearch_7/helpers/__pycache__/actions.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c9f49267e6d4575e690d52300a02feb4a9c09f9
Binary files /dev/null and b/elasticsearch_7/helpers/__pycache__/actions.cpython-38.pyc differ
diff --git a/elasticsearch_7/helpers/__pycache__/errors.cpython-38.pyc b/elasticsearch_7/helpers/__pycache__/errors.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f13d453d1d5f5fadc519839ff98596b0f2fb8e7e
Binary files /dev/null and b/elasticsearch_7/helpers/__pycache__/errors.cpython-38.pyc differ
diff --git a/elasticsearch_7/helpers/__pycache__/test.cpython-38.pyc b/elasticsearch_7/helpers/__pycache__/test.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6f010e4567580c40b74f36fbb6c24cdc1886c008
Binary files /dev/null and b/elasticsearch_7/helpers/__pycache__/test.cpython-38.pyc differ
diff --git a/elasticsearch_7/helpers/actions.py b/elasticsearch_7/helpers/actions.py
new file mode 100644
index 0000000000000000000000000000000000000000..7202f077b880a82b8aa4311b4c029aad801593d2
--- /dev/null
+++ b/elasticsearch_7/helpers/actions.py
@@ -0,0 +1,593 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from operator import methodcaller
+import time
+
+from ..exceptions import TransportError
+from ..compat import map, string_types, Queue
+
+from .errors import ScanError, BulkIndexError
+
+import logging
+
+
+logger = logging.getLogger("elasticsearch.helpers")
+
+
+def expand_action(data):
+    """
+    From one document or action definition passed in by the user extract the
+    action/data lines needed for elasticsearch's
+    :meth:`~elasticsearch.Elasticsearch.bulk` api.
+    """
+    # when given a string, assume user wants to index raw json
+    if isinstance(data, string_types):
+        return '{"index":{}}', data
+
+    # make sure we don't alter the action
+    data = data.copy()
+    op_type = data.pop("_op_type", "index")
+    action = {op_type: {}}
+    for key in (
+        "_id",
+        "_index",
+        "_parent",
+        "_percolate",
+        "_retry_on_conflict",
+        "_routing",
+        "_timestamp",
+        "_type",
+        "_version",
+        "_version_type",
+        "parent",
+        "pipeline",
+        "retry_on_conflict",
+        "routing",
+        "version",
+        "version_type",
+    ):
+        if key in data:
+            if key in [
+                "_parent",
+                "_retry_on_conflict",
+                "_routing",
+                "_version",
+                "_version_type",
+            ]:
+                action[op_type][key[1:]] = data.pop(key)
+            else:
+                action[op_type][key] = data.pop(key)
+
+    # no data payload for delete
+    if op_type == "delete":
+        return action, None
+
+    return action, data.get("_source", data)
+
+
+class _ActionChunker:
+    def __init__(self, chunk_size, max_chunk_bytes, serializer):
+        self.chunk_size = chunk_size
+        self.max_chunk_bytes = max_chunk_bytes
+        self.serializer = serializer
+
+        self.size = 0
+        self.action_count = 0
+        self.bulk_actions = []
+        self.bulk_data = []
+
+    def feed(self, action, data):
+        ret = None
+        raw_data, raw_action = data, action
+        action = self.serializer.dumps(action)
+        # +1 to account for the trailing new line character
+        cur_size = len(action.encode("utf-8")) + 1
+
+        if data is not None:
+            data = self.serializer.dumps(data)
+            cur_size += len(data.encode("utf-8")) + 1
+
+        # full chunk, send it and start a new one
+        if self.bulk_actions and (
+            self.size + cur_size > self.max_chunk_bytes
+            or self.action_count == self.chunk_size
+        ):
+            ret = (self.bulk_data, self.bulk_actions)
+            self.bulk_actions, self.bulk_data = [], []
+            self.size, self.action_count = 0, 0
+
+        self.bulk_actions.append(action)
+        if data is not None:
+            self.bulk_actions.append(data)
+            self.bulk_data.append((raw_action, raw_data))
+        else:
+            self.bulk_data.append((raw_action,))
+
+        self.size += cur_size
+        self.action_count += 1
+        return ret
+
+    def flush(self):
+        ret = None
+        if self.bulk_actions:
+            ret = (self.bulk_data, self.bulk_actions)
+            self.bulk_actions, self.bulk_data = [], []
+        return ret
+
+
+def _chunk_actions(actions, chunk_size, max_chunk_bytes, serializer):
+    """
+    Split actions into chunks by number or size, serialize them into strings in
+    the process.
+    """
+    chunker = _ActionChunker(
+        chunk_size=chunk_size, max_chunk_bytes=max_chunk_bytes, serializer=serializer
+    )
+    for action, data in actions:
+        ret = chunker.feed(action, data)
+        if ret:
+            yield ret
+    ret = chunker.flush()
+    if ret:
+        yield ret
+
+
+def _process_bulk_chunk_success(resp, bulk_data, raise_on_error=True):
+    # if raise on error is set, we need to collect errors per chunk before raising them
+    errors = []
+
+    # go through request-response pairs and detect failures
+    for data, (op_type, item) in zip(
+        bulk_data, map(methodcaller("popitem"), resp["items"])
+    ):
+        ok = 200 <= item.get("status", 500) < 300
+        if not ok and raise_on_error:
+            # include original document source
+            if len(data) > 1:
+                item["data"] = data[1]
+            errors.append({op_type: item})
+
+        if ok or not errors:
+            # if we are not just recording all errors to be able to raise
+            # them all at once, yield items individually
+            yield ok, {op_type: item}
+
+    if errors:
+        raise BulkIndexError("%i document(s) failed to index." % len(errors), errors)
+
+
+def _process_bulk_chunk_error(
+    error, bulk_data, raise_on_exception=True, raise_on_error=True
+):
+    # default behavior - just propagate exception
+    if raise_on_exception:
+        raise error
+
+    # if we are not propagating, mark all actions in current chunk as failed
+    err_message = str(error)
+    exc_errors = []
+
+    for data in bulk_data:
+        # collect all the information about failed actions
+        op_type, action = data[0].copy().popitem()
+        info = {"error": err_message, "status": error.status_code, "exception": error}
+        if op_type != "delete":
+            info["data"] = data[1]
+        info.update(action)
+        exc_errors.append({op_type: info})
+
+    # emulate standard behavior for failed actions
+    if raise_on_error:
+        raise BulkIndexError(
+            "%i document(s) failed to index." % len(exc_errors), exc_errors
+        )
+    else:
+        for err in exc_errors:
+            yield False, err
+
+
+def _process_bulk_chunk(
+    client,
+    bulk_actions,
+    bulk_data,
+    raise_on_exception=True,
+    raise_on_error=True,
+    *args,
+    **kwargs
+):
+    """
+    Send a bulk request to elasticsearch and process the output.
+    """
+    try:
+        # send the actual request
+        resp = client.bulk("\n".join(bulk_actions) + "\n", *args, **kwargs)
+    except TransportError as e:
+        gen = _process_bulk_chunk_error(
+            error=e,
+            bulk_data=bulk_data,
+            raise_on_exception=raise_on_exception,
+            raise_on_error=raise_on_error,
+        )
+    else:
+        gen = _process_bulk_chunk_success(
+            resp=resp, bulk_data=bulk_data, raise_on_error=raise_on_error
+        )
+    for item in gen:
+        yield item
+
+
+def streaming_bulk(
+    client,
+    actions,
+    chunk_size=500,
+    max_chunk_bytes=100 * 1024 * 1024,
+    raise_on_error=True,
+    expand_action_callback=expand_action,
+    raise_on_exception=True,
+    max_retries=0,
+    initial_backoff=2,
+    max_backoff=600,
+    yield_ok=True,
+    *args,
+    **kwargs
+):
+
+    """
+    Streaming bulk consumes actions from the iterable passed in and yields
+    results per action. For non-streaming usecases use
+    :func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming
+    bulk that returns summary information about the bulk operation once the
+    entire input is consumed and sent.
+
+    If you specify ``max_retries`` it will also retry any documents that were
+    rejected with a ``429`` status code. To do this it will wait (**by calling
+    time.sleep which will block**) for ``initial_backoff`` seconds and then,
+    every subsequent rejection for the same chunk, for double the time every
+    time up to ``max_backoff`` seconds.
+
+    :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
+    :arg actions: iterable containing the actions to be executed
+    :arg chunk_size: number of docs in one chunk sent to es (default: 500)
+    :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
+    :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
+        from the execution of the last chunk when some occur. By default we raise.
+    :arg raise_on_exception: if ``False`` then don't propagate exceptions from
+        call to ``bulk`` and just report the items that failed as failed.
+    :arg expand_action_callback: callback executed on each action passed in,
+        should return a tuple containing the action line and the data line
+        (`None` if data line should be omitted).
+    :arg max_retries: maximum number of times a document will be retried when
+        ``429`` is received, set to 0 (default) for no retries on ``429``
+    :arg initial_backoff: number of seconds we should wait before the first
+        retry. Any subsequent retries will be powers of ``initial_backoff *
+        2**retry_number``
+    :arg max_backoff: maximum number of seconds a retry will wait
+    :arg yield_ok: if set to False will skip successful documents in the output
+    """
+    actions = map(expand_action_callback, actions)
+
+    for bulk_data, bulk_actions in _chunk_actions(
+        actions, chunk_size, max_chunk_bytes, client.transport.serializer
+    ):
+
+        for attempt in range(max_retries + 1):
+            to_retry, to_retry_data = [], []
+            if attempt:
+                time.sleep(min(max_backoff, initial_backoff * 2 ** (attempt - 1)))
+
+            try:
+                for data, (ok, info) in zip(
+                    bulk_data,
+                    _process_bulk_chunk(
+                        client,
+                        bulk_actions,
+                        bulk_data,
+                        raise_on_exception,
+                        raise_on_error,
+                        *args,
+                        **kwargs
+                    ),
+                ):
+
+                    if not ok:
+                        action, info = info.popitem()
+                        # retry if retries enabled, we get 429, and we are not
+                        # in the last attempt
+                        if (
+                            max_retries
+                            and info["status"] == 429
+                            and (attempt + 1) <= max_retries
+                        ):
+                            # _process_bulk_chunk expects strings so we need to
+                            # re-serialize the data
+                            to_retry.extend(
+                                map(client.transport.serializer.dumps, data)
+                            )
+                            to_retry_data.append(data)
+                        else:
+                            yield ok, {action: info}
+                    elif yield_ok:
+                        yield ok, info
+
+            except TransportError as e:
+                # suppress 429 errors since we will retry them
+                if attempt == max_retries or e.status_code != 429:
+                    raise
+            else:
+                if not to_retry:
+                    break
+                # retry only subset of documents that didn't succeed
+                bulk_actions, bulk_data = to_retry, to_retry_data
+
+
+def bulk(client, actions, stats_only=False, *args, **kwargs):
+    """
+    Helper for the :meth:`~elasticsearch.Elasticsearch.bulk` api that provides
+    a more human friendly interface - it consumes an iterator of actions and
+    sends them to elasticsearch in chunks. It returns a tuple with summary
+    information - number of successfully executed actions and either list of
+    errors or number of errors if ``stats_only`` is set to ``True``. Note that
+    by default we raise a ``BulkIndexError`` when we encounter an error so
+    options like ``stats_only`` only apply when ``raise_on_error`` is set to
+    ``False``.
+
+    When errors are being collected original document data is included in the
+    error dictionary which can lead to an extra high memory usage. If you need
+    to process a lot of data and want to ignore/collect errors please consider
+    using the :func:`~elasticsearch.helpers.streaming_bulk` helper which will
+    just return the errors and not store them in memory.
+
+
+    :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
+    :arg actions: iterator containing the actions
+    :arg stats_only: if `True` only report number of successful/failed
+        operations instead of just number of successful and a list of error responses
+
+    Any additional keyword arguments will be passed to
+    :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute
+    the operation, see :func:`~elasticsearch.helpers.streaming_bulk` for more
+    accepted parameters.
+    """
+    success, failed = 0, 0
+
+    # list of errors to be collected is not stats_only
+    errors = []
+
+    # make streaming_bulk yield successful results so we can count them
+    kwargs["yield_ok"] = True
+    for ok, item in streaming_bulk(client, actions, *args, **kwargs):
+        # go through request-response pairs and detect failures
+        if not ok:
+            if not stats_only:
+                errors.append(item)
+            failed += 1
+        else:
+            success += 1
+
+    return success, failed if stats_only else errors
+
+
+def parallel_bulk(
+    client,
+    actions,
+    thread_count=4,
+    chunk_size=500,
+    max_chunk_bytes=100 * 1024 * 1024,
+    queue_size=4,
+    expand_action_callback=expand_action,
+    *args,
+    **kwargs
+):
+    """
+    Parallel version of the bulk helper run in multiple threads at once.
+
+    :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
+    :arg actions: iterator containing the actions
+    :arg thread_count: size of the threadpool to use for the bulk requests
+    :arg chunk_size: number of docs in one chunk sent to es (default: 500)
+    :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
+    :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
+        from the execution of the last chunk when some occur. By default we raise.
+    :arg raise_on_exception: if ``False`` then don't propagate exceptions from
+        call to ``bulk`` and just report the items that failed as failed.
+    :arg expand_action_callback: callback executed on each action passed in,
+        should return a tuple containing the action line and the data line
+        (`None` if data line should be omitted).
+    :arg queue_size: size of the task queue between the main thread (producing
+        chunks to send) and the processing threads.
+    """
+    # Avoid importing multiprocessing unless parallel_bulk is used
+    # to avoid exceptions on restricted environments like App Engine
+    from multiprocessing.pool import ThreadPool
+
+    actions = map(expand_action_callback, actions)
+
+    class BlockingPool(ThreadPool):
+        def _setup_queues(self):
+            super(BlockingPool, self)._setup_queues()
+            # The queue must be at least the size of the number of threads to
+            # prevent hanging when inserting sentinel values during teardown.
+            self._inqueue = Queue(max(queue_size, thread_count))
+            self._quick_put = self._inqueue.put
+
+    pool = BlockingPool(thread_count)
+
+    try:
+        for result in pool.imap(
+            lambda bulk_chunk: list(
+                _process_bulk_chunk(
+                    client, bulk_chunk[1], bulk_chunk[0], *args, **kwargs
+                )
+            ),
+            _chunk_actions(
+                actions, chunk_size, max_chunk_bytes, client.transport.serializer
+            ),
+        ):
+            for item in result:
+                yield item
+
+    finally:
+        pool.close()
+        pool.join()
+
+
+def scan(
+    client,
+    query=None,
+    scroll="5m",
+    raise_on_error=True,
+    preserve_order=False,
+    size=1000,
+    request_timeout=None,
+    clear_scroll=True,
+    scroll_kwargs=None,
+    **kwargs
+):
+    """
+    Simple abstraction on top of the
+    :meth:`~elasticsearch.Elasticsearch.scroll` api - a simple iterator that
+    yields all hits as returned by underlining scroll requests.
+
+    By default scan does not return results in any pre-determined order. To
+    have a standard order in the returned documents (either by score or
+    explicit sort definition) when scrolling, use ``preserve_order=True``. This
+    may be an expensive operation and will negate the performance benefits of
+    using ``scan``.
+
+    :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
+    :arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api
+    :arg scroll: Specify how long a consistent view of the index should be
+        maintained for scrolled search
+    :arg raise_on_error: raises an exception (``ScanError``) if an error is
+        encountered (some shards fail to execute). By default we raise.
+    :arg preserve_order: don't set the ``search_type`` to ``scan`` - this will
+        cause the scroll to paginate with preserving the order. Note that this
+        can be an extremely expensive operation and can easily lead to
+        unpredictable results, use with caution.
+    :arg size: size (per shard) of the batch send at each iteration.
+    :arg request_timeout: explicit timeout for each call to ``scan``
+    :arg clear_scroll: explicitly calls delete on the scroll id via the clear
+        scroll API at the end of the method on completion or error, defaults
+        to true.
+    :arg scroll_kwargs: additional kwargs to be passed to
+        :meth:`~elasticsearch.Elasticsearch.scroll`
+
+    Any additional keyword arguments will be passed to the initial
+    :meth:`~elasticsearch.Elasticsearch.search` call::
+
+        scan(es,
+            query={"query": {"match": {"title": "python"}}},
+            index="orders-*",
+            doc_type="books"
+        )
+
+    """
+    scroll_kwargs = scroll_kwargs or {}
+
+    if not preserve_order:
+        query = query.copy() if query else {}
+        query["sort"] = "_doc"
+
+    # initial search
+    resp = client.search(
+        body=query, scroll=scroll, size=size, request_timeout=request_timeout, **kwargs
+    )
+    scroll_id = resp.get("_scroll_id")
+
+    try:
+        while scroll_id and resp["hits"]["hits"]:
+            for hit in resp["hits"]["hits"]:
+                yield hit
+
+            # check if we have any errors
+            if (resp["_shards"]["successful"] + resp["_shards"]["skipped"]) < resp[
+                "_shards"
+            ]["total"]:
+                logger.warning(
+                    "Scroll request has only succeeded on %d (+%d skipped) shards out of %d.",
+                    resp["_shards"]["successful"],
+                    resp["_shards"]["skipped"],
+                    resp["_shards"]["total"],
+                )
+                if raise_on_error:
+                    raise ScanError(
+                        scroll_id,
+                        "Scroll request has only succeeded on %d (+%d skiped) shards out of %d."
+                        % (
+                            resp["_shards"]["successful"],
+                            resp["_shards"]["skipped"],
+                            resp["_shards"]["total"],
+                        ),
+                    )
+            resp = client.scroll(
+                body={"scroll_id": scroll_id, "scroll": scroll}, **scroll_kwargs
+            )
+            scroll_id = resp.get("_scroll_id")
+
+    finally:
+        if scroll_id and clear_scroll:
+            client.clear_scroll(body={"scroll_id": [scroll_id]}, ignore=(404,))
+
+
+def reindex(
+    client,
+    source_index,
+    target_index,
+    query=None,
+    target_client=None,
+    chunk_size=500,
+    scroll="5m",
+    scan_kwargs={},
+    bulk_kwargs={},
+):
+
+    """
+    Reindex all documents from one index that satisfy a given query
+    to another, potentially (if `target_client` is specified) on a different cluster.
+    If you don't specify the query you will reindex all the documents.
+
+    Since ``2.3`` a :meth:`~elasticsearch.Elasticsearch.reindex` api is
+    available as part of elasticsearch itself. It is recommended to use the api
+    instead of this helper wherever possible. The helper is here mostly for
+    backwards compatibility and for situations where more flexibility is
+    needed.
+
+    .. note::
+
+        This helper doesn't transfer mappings, just the data.
+
+    :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use (for
+        read if `target_client` is specified as well)
+    :arg source_index: index (or list of indices) to read documents from
+    :arg target_index: name of the index in the target cluster to populate
+    :arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api
+    :arg target_client: optional, is specified will be used for writing (thus
+        enabling reindex between clusters)
+    :arg chunk_size: number of docs in one chunk sent to es (default: 500)
+    :arg scroll: Specify how long a consistent view of the index should be
+        maintained for scrolled search
+    :arg scan_kwargs: additional kwargs to be passed to
+        :func:`~elasticsearch.helpers.scan`
+    :arg bulk_kwargs: additional kwargs to be passed to
+        :func:`~elasticsearch.helpers.bulk`
+    """
+    target_client = client if target_client is None else target_client
+    docs = scan(client, query=query, index=source_index, scroll=scroll, **scan_kwargs)
+
+    def _change_doc_index(hits, index):
+        for h in hits:
+            h["_index"] = index
+            if "fields" in h:
+                h.update(h.pop("fields"))
+            yield h
+
+    kwargs = {"stats_only": True}
+    kwargs.update(bulk_kwargs)
+    return bulk(
+        target_client,
+        _change_doc_index(docs, target_index),
+        chunk_size=chunk_size,
+        **kwargs
+    )
diff --git a/elasticsearch_7/helpers/errors.py b/elasticsearch_7/helpers/errors.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c06da2e639c69cec910abb3df9cf42fee6bf9d4
--- /dev/null
+++ b/elasticsearch_7/helpers/errors.py
@@ -0,0 +1,18 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+from ..exceptions import ElasticsearchException
+
+
+class BulkIndexError(ElasticsearchException):
+    @property
+    def errors(self):
+        """ List of errors from execution of the last chunk. """
+        return self.args[1]
+
+
+class ScanError(ElasticsearchException):
+    def __init__(self, scroll_id, *args, **kwargs):
+        super(ScanError, self).__init__(*args, **kwargs)
+        self.scroll_id = scroll_id
diff --git a/elasticsearch_7/helpers/test.py b/elasticsearch_7/helpers/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe9e1da0e98494acfaed058af711b5a7ca74d066
--- /dev/null
+++ b/elasticsearch_7/helpers/test.py
@@ -0,0 +1,70 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import time
+import os
+from unittest import TestCase, SkipTest
+
+from elasticsearch import Elasticsearch
+from elasticsearch.exceptions import ConnectionError
+
+
+def get_test_client(nowait=False, **kwargs):
+    # construct kwargs from the environment
+    kw = {"timeout": 30, "ca_certs": ".ci/certs/ca.pem"}
+
+    if "PYTHON_CONNECTION_CLASS" in os.environ:
+        from elasticsearch import connection
+
+        kw["connection_class"] = getattr(
+            connection, os.environ["PYTHON_CONNECTION_CLASS"]
+        )
+
+    kw.update(kwargs)
+    client = Elasticsearch([os.environ.get("ELASTICSEARCH_HOST", {})], **kw)
+
+    # wait for yellow status
+    for _ in range(1 if nowait else 100):
+        try:
+            client.cluster.health(wait_for_status="yellow")
+            return client
+        except ConnectionError:
+            time.sleep(0.1)
+    else:
+        # timeout
+        raise SkipTest("Elasticsearch failed to start.")
+
+
+def _get_version(version_string):
+    if "." not in version_string:
+        return ()
+    version = version_string.strip().split(".")
+    return tuple(int(v) if v.isdigit() else 999 for v in version)
+
+
+class ElasticsearchTestCase(TestCase):
+    @staticmethod
+    def _get_client():
+        return get_test_client()
+
+    @classmethod
+    def setup_class(cls):
+        cls.client = cls._get_client()
+
+    def teardown_method(self, _):
+        # Hidden indices expanded in wildcards in ES 7.7
+        expand_wildcards = ["open", "closed"]
+        if self.es_version() >= (7, 7):
+            expand_wildcards.append("hidden")
+
+        self.client.indices.delete(
+            index="*", ignore=404, expand_wildcards=expand_wildcards
+        )
+        self.client.indices.delete_template(name="*", ignore=404)
+
+    def es_version(self):
+        if not hasattr(self, "_es_version"):
+            version_string = self.client.info()["version"]["number"]
+            self._es_version = _get_version(version_string)
+        return self._es_version
diff --git a/elasticsearch_7/serializer.py b/elasticsearch_7/serializer.py
new file mode 100644
index 0000000000000000000000000000000000000000..50f98c0f04d3460a7e04ae216d241bc4946af8f9
--- /dev/null
+++ b/elasticsearch_7/serializer.py
@@ -0,0 +1,143 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+try:
+    import simplejson as json
+except ImportError:
+    import json
+
+import uuid
+from datetime import date, datetime
+from decimal import Decimal
+
+from .exceptions import SerializationError, ImproperlyConfigured
+from .compat import string_types
+
+INTEGER_TYPES = ()
+FLOAT_TYPES = (Decimal,)
+TIME_TYPES = (date, datetime)
+
+try:
+    import numpy as np
+
+    INTEGER_TYPES += (
+        np.int_,
+        np.intc,
+        np.int8,
+        np.int16,
+        np.int32,
+        np.int64,
+        np.uint8,
+        np.uint16,
+        np.uint32,
+        np.uint64,
+    )
+    FLOAT_TYPES += (
+        np.float_,
+        np.float16,
+        np.float32,
+        np.float64,
+    )
+except ImportError:
+    np = None
+
+try:
+    import pandas as pd
+
+    TIME_TYPES += (pd.Timestamp,)
+except ImportError:
+    pd = None
+
+
+class TextSerializer(object):
+    mimetype = "text/plain"
+
+    def loads(self, s):
+        return s
+
+    def dumps(self, data):
+        if isinstance(data, string_types):
+            return data
+
+        raise SerializationError("Cannot serialize %r into text." % data)
+
+
+class JSONSerializer(object):
+    mimetype = "application/json"
+
+    def default(self, data):
+        if isinstance(data, TIME_TYPES):
+            return data.isoformat()
+        elif isinstance(data, uuid.UUID):
+            return str(data)
+        elif isinstance(data, FLOAT_TYPES):
+            return float(data)
+        elif INTEGER_TYPES and isinstance(data, INTEGER_TYPES):
+            return int(data)
+
+        # Special cases for numpy and pandas types
+        elif np:
+            if isinstance(data, np.bool_):
+                return bool(data)
+            elif isinstance(data, np.datetime64):
+                return data.item().isoformat()
+            elif isinstance(data, np.ndarray):
+                return data.tolist()
+        if pd:
+            if isinstance(data, (pd.Series, pd.Categorical)):
+                return data.tolist()
+            elif hasattr(pd, "NA") and pd.isna(data):
+                return None
+
+        raise TypeError("Unable to serialize %r (type: %s)" % (data, type(data)))
+
+    def loads(self, s):
+        try:
+            return json.loads(s)
+        except (ValueError, TypeError) as e:
+            raise SerializationError(s, e)
+
+    def dumps(self, data):
+        # don't serialize strings
+        if isinstance(data, string_types):
+            return data
+
+        try:
+            return json.dumps(
+                data, default=self.default, ensure_ascii=False, separators=(",", ":")
+            )
+        except (ValueError, TypeError) as e:
+            raise SerializationError(data, e)
+
+
+DEFAULT_SERIALIZERS = {
+    JSONSerializer.mimetype: JSONSerializer(),
+    TextSerializer.mimetype: TextSerializer(),
+}
+
+
+class Deserializer(object):
+    def __init__(self, serializers, default_mimetype="application/json"):
+        try:
+            self.default = serializers[default_mimetype]
+        except KeyError:
+            raise ImproperlyConfigured(
+                "Cannot find default serializer (%s)" % default_mimetype
+            )
+        self.serializers = serializers
+
+    def loads(self, s, mimetype=None):
+        if not mimetype:
+            deserializer = self.default
+        else:
+            # split out charset
+            mimetype, _, _ = mimetype.partition(";")
+            try:
+                deserializer = self.serializers[mimetype]
+            except KeyError:
+                raise SerializationError(
+                    "Unknown mimetype, unable to deserialize: %s" % mimetype
+                )
+
+        return deserializer.loads(s)
diff --git a/elasticsearch_7/transport.py b/elasticsearch_7/transport.py
new file mode 100644
index 0000000000000000000000000000000000000000..79e8d54058a6e09b041bd3ad5818ec7718d17dc9
--- /dev/null
+++ b/elasticsearch_7/transport.py
@@ -0,0 +1,428 @@
+# Licensed to Elasticsearch B.V under one or more agreements.
+# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
+# See the LICENSE file in the project root for more information
+
+import time
+from itertools import chain
+
+from .connection import Urllib3HttpConnection
+from .connection_pool import ConnectionPool, DummyConnectionPool, EmptyConnectionPool
+from .serializer import JSONSerializer, Deserializer, DEFAULT_SERIALIZERS
+from .exceptions import (
+    ConnectionError,
+    TransportError,
+    SerializationError,
+    ConnectionTimeout,
+)
+
+
+def get_host_info(node_info, host):
+    """
+    Simple callback that takes the node info from `/_cluster/nodes` and a
+    parsed connection information and return the connection information. If
+    `None` is returned this node will be skipped.
+
+    Useful for filtering nodes (by proximity for example) or if additional
+    information needs to be provided for the :class:`~elasticsearch.Connection`
+    class. By default master only nodes are filtered out since they shouldn't
+    typically be used for API operations.
+
+    :arg node_info: node information from `/_cluster/nodes`
+    :arg host: connection information (host, port) extracted from the node info
+    """
+    # ignore master only nodes
+    if node_info.get("roles", []) == ["master"]:
+        return None
+    return host
+
+
+class Transport(object):
+    """
+    Encapsulation of transport-related to logic. Handles instantiation of the
+    individual connections as well as creating a connection pool to hold them.
+
+    Main interface is the `perform_request` method.
+    """
+
+    DEFAULT_CONNECTION_CLASS = Urllib3HttpConnection
+
+    def __init__(
+        self,
+        hosts,
+        connection_class=None,
+        connection_pool_class=ConnectionPool,
+        host_info_callback=get_host_info,
+        sniff_on_start=False,
+        sniffer_timeout=None,
+        sniff_timeout=0.1,
+        sniff_on_connection_fail=False,
+        serializer=JSONSerializer(),
+        serializers=None,
+        default_mimetype="application/json",
+        max_retries=3,
+        retry_on_status=(502, 503, 504),
+        retry_on_timeout=False,
+        send_get_body_as="GET",
+        **kwargs
+    ):
+        """
+        :arg hosts: list of dictionaries, each containing keyword arguments to
+            create a `connection_class` instance
+        :arg connection_class: subclass of :class:`~elasticsearch.Connection` to use
+        :arg connection_pool_class: subclass of :class:`~elasticsearch.ConnectionPool` to use
+        :arg host_info_callback: callback responsible for taking the node information from
+            `/_cluster/nodes`, along with already extracted information, and
+            producing a list of arguments (same as `hosts` parameter)
+        :arg sniff_on_start: flag indicating whether to obtain a list of nodes
+            from the cluster at startup time
+        :arg sniffer_timeout: number of seconds between automatic sniffs
+        :arg sniff_on_connection_fail: flag controlling if connection failure triggers a sniff
+        :arg sniff_timeout: timeout used for the sniff request - it should be a
+            fast api call and we are talking potentially to more nodes so we want
+            to fail quickly. Not used during initial sniffing (if
+            ``sniff_on_start`` is on) when the connection still isn't
+            initialized.
+        :arg serializer: serializer instance
+        :arg serializers: optional dict of serializer instances that will be
+            used for deserializing data coming from the server. (key is the mimetype)
+        :arg default_mimetype: when no mimetype is specified by the server
+            response assume this mimetype, defaults to `'application/json'`
+        :arg max_retries: maximum number of retries before an exception is propagated
+        :arg retry_on_status: set of HTTP status codes on which we should retry
+            on a different node. defaults to ``(502, 503, 504)``
+        :arg retry_on_timeout: should timeout trigger a retry on different
+            node? (default `False`)
+        :arg send_get_body_as: for GET requests with body this option allows
+            you to specify an alternate way of execution for environments that
+            don't support passing bodies with GET requests. If you set this to
+            'POST' a POST method will be used instead, if to 'source' then the body
+            will be serialized and passed as a query parameter `source`.
+
+        Any extra keyword arguments will be passed to the `connection_class`
+        when creating and instance unless overridden by that connection's
+        options provided as part of the hosts parameter.
+        """
+        if connection_class is None:
+            connection_class = self.DEFAULT_CONNECTION_CLASS
+
+        # serialization config
+        _serializers = DEFAULT_SERIALIZERS.copy()
+        # if a serializer has been specified, use it for deserialization as well
+        _serializers[serializer.mimetype] = serializer
+        # if custom serializers map has been supplied, override the defaults with it
+        if serializers:
+            _serializers.update(serializers)
+        # create a deserializer with our config
+        self.deserializer = Deserializer(_serializers, default_mimetype)
+
+        self.max_retries = max_retries
+        self.retry_on_timeout = retry_on_timeout
+        self.retry_on_status = retry_on_status
+        self.send_get_body_as = send_get_body_as
+
+        # data serializer
+        self.serializer = serializer
+
+        # store all strategies...
+        self.connection_pool_class = connection_pool_class
+        self.connection_class = connection_class
+
+        # ...save kwargs to be passed to the connections
+        self.kwargs = kwargs
+        self.hosts = hosts
+
+        # Start with an empty pool specifically for `AsyncTransport`.
+        # It should never be used, will be replaced on first call to
+        # .set_connections()
+        self.connection_pool = EmptyConnectionPool()
+
+        if hosts:
+            # ...and instantiate them
+            self.set_connections(hosts)
+            # retain the original connection instances for sniffing
+            self.seed_connections = list(self.connection_pool.connections[:])
+        else:
+            self.seed_connections = []
+
+        # Don't enable sniffing on Cloud instances.
+        if kwargs.get("cloud_id", False):
+            sniff_on_start = False
+            sniff_on_connection_fail = False
+
+        # sniffing data
+        self.sniffer_timeout = sniffer_timeout
+        self.sniff_on_start = sniff_on_start
+        self.sniff_on_connection_fail = sniff_on_connection_fail
+        self.last_sniff = time.time()
+        self.sniff_timeout = sniff_timeout
+
+        # callback to construct host dict from data in /_cluster/nodes
+        self.host_info_callback = host_info_callback
+
+        if sniff_on_start:
+            self.sniff_hosts(True)
+
+    def add_connection(self, host):
+        """
+        Create a new :class:`~elasticsearch.Connection` instance and add it to the pool.
+
+        :arg host: kwargs that will be used to create the instance
+        """
+        self.hosts.append(host)
+        self.set_connections(self.hosts)
+
+    def set_connections(self, hosts):
+        """
+        Instantiate all the connections and create new connection pool to hold them.
+        Tries to identify unchanged hosts and re-use existing
+        :class:`~elasticsearch.Connection` instances.
+
+        :arg hosts: same as `__init__`
+        """
+        # construct the connections
+        def _create_connection(host):
+            # if this is not the initial setup look at the existing connection
+            # options and identify connections that haven't changed and can be
+            # kept around.
+            if hasattr(self, "connection_pool"):
+                for (connection, old_host) in self.connection_pool.connection_opts:
+                    if old_host == host:
+                        return connection
+
+            # previously unseen params, create new connection
+            kwargs = self.kwargs.copy()
+            kwargs.update(host)
+            return self.connection_class(**kwargs)
+
+        connections = map(_create_connection, hosts)
+
+        connections = list(zip(connections, hosts))
+        if len(connections) == 1:
+            self.connection_pool = DummyConnectionPool(connections)
+        else:
+            # pass the hosts dicts to the connection pool to optionally extract parameters from
+            self.connection_pool = self.connection_pool_class(
+                connections, **self.kwargs
+            )
+
+    def get_connection(self):
+        """
+        Retrieve a :class:`~elasticsearch.Connection` instance from the
+        :class:`~elasticsearch.ConnectionPool` instance.
+        """
+        if self.sniffer_timeout:
+            if time.time() >= self.last_sniff + self.sniffer_timeout:
+                self.sniff_hosts()
+        return self.connection_pool.get_connection()
+
+    def _get_sniff_data(self, initial=False):
+        """
+        Perform the request to get sniffing information. Returns a list of
+        dictionaries (one per node) containing all the information from the
+        cluster.
+
+        It also sets the last_sniff attribute in case of a successful attempt.
+
+        In rare cases it might be possible to override this method in your
+        custom Transport class to serve data from alternative source like
+        configuration management.
+        """
+        previous_sniff = self.last_sniff
+
+        try:
+            # reset last_sniff timestamp
+            self.last_sniff = time.time()
+            # go through all current connections as well as the
+            # seed_connections for good measure
+            for c in chain(self.connection_pool.connections, self.seed_connections):
+                try:
+                    # use small timeout for the sniffing request, should be a fast api call
+                    _, headers, node_info = c.perform_request(
+                        "GET",
+                        "/_nodes/_all/http",
+                        timeout=self.sniff_timeout if not initial else None,
+                    )
+                    node_info = self.deserializer.loads(
+                        node_info, headers.get("content-type")
+                    )
+                    break
+                except (ConnectionError, SerializationError):
+                    pass
+            else:
+                raise TransportError("N/A", "Unable to sniff hosts.")
+        except Exception:
+            # keep the previous value on error
+            self.last_sniff = previous_sniff
+            raise
+
+        return list(node_info["nodes"].values())
+
+    def _get_host_info(self, host_info):
+        host = {}
+        address = host_info.get("http", {}).get("publish_address")
+
+        # malformed or no address given
+        if not address or ":" not in address:
+            return None
+
+        if "/" in address:
+            # Support 7.x host/ip:port behavior where http.publish_host has been set.
+            fqdn, ipaddress = address.split("/", 1)
+            host["host"] = fqdn
+            _, host["port"] = ipaddress.rsplit(":", 1)
+            host["port"] = int(host["port"])
+
+        else:
+            host["host"], host["port"] = address.rsplit(":", 1)
+            host["port"] = int(host["port"])
+
+        return self.host_info_callback(host_info, host)
+
+    def sniff_hosts(self, initial=False):
+        """
+        Obtain a list of nodes from the cluster and create a new connection
+        pool using the information retrieved.
+
+        To extract the node connection parameters use the ``nodes_to_host_callback``.
+
+        :arg initial: flag indicating if this is during startup
+            (``sniff_on_start``), ignore the ``sniff_timeout`` if ``True``
+        """
+        node_info = self._get_sniff_data(initial)
+
+        hosts = list(filter(None, (self._get_host_info(n) for n in node_info)))
+
+        # we weren't able to get any nodes or host_info_callback blocked all -
+        # raise error.
+        if not hosts:
+            raise TransportError(
+                "N/A", "Unable to sniff hosts - no viable hosts found."
+            )
+
+        self.set_connections(hosts)
+
+    def mark_dead(self, connection):
+        """
+        Mark a connection as dead (failed) in the connection pool. If sniffing
+        on failure is enabled this will initiate the sniffing process.
+
+        :arg connection: instance of :class:`~elasticsearch.Connection` that failed
+        """
+        # mark as dead even when sniffing to avoid hitting this host during the sniff process
+        self.connection_pool.mark_dead(connection)
+        if self.sniff_on_connection_fail:
+            self.sniff_hosts()
+
+    def perform_request(self, method, url, headers=None, params=None, body=None):
+        """
+        Perform the actual request. Retrieve a connection from the connection
+        pool, pass all the information to it's perform_request method and
+        return the data.
+
+        If an exception was raised, mark the connection as failed and retry (up
+        to `max_retries` times).
+
+        If the operation was successful and the connection used was previously
+        marked as dead, mark it as live, resetting it's failure count.
+
+        :arg method: HTTP method to use
+        :arg url: absolute url (without host) to target
+        :arg headers: dictionary of headers, will be handed over to the
+            underlying :class:`~elasticsearch.Connection` class
+        :arg params: dictionary of query parameters, will be handed over to the
+            underlying :class:`~elasticsearch.Connection` class for serialization
+        :arg body: body of the request, will be serialized using serializer and
+            passed to the connection
+        """
+        method, params, body, ignore, timeout = self._resolve_request_args(
+            method, params, body
+        )
+
+        for attempt in range(self.max_retries + 1):
+            connection = self.get_connection()
+
+            try:
+                status, headers_response, data = connection.perform_request(
+                    method,
+                    url,
+                    params,
+                    body,
+                    headers=headers,
+                    ignore=ignore,
+                    timeout=timeout,
+                )
+
+            except TransportError as e:
+                if method == "HEAD" and e.status_code == 404:
+                    return False
+
+                retry = False
+                if isinstance(e, ConnectionTimeout):
+                    retry = self.retry_on_timeout
+                elif isinstance(e, ConnectionError):
+                    retry = True
+                elif e.status_code in self.retry_on_status:
+                    retry = True
+
+                if retry:
+                    # only mark as dead if we are retrying
+                    self.mark_dead(connection)
+                    # raise exception on last retry
+                    if attempt == self.max_retries:
+                        raise
+                else:
+                    raise
+
+            else:
+                # connection didn't fail, confirm it's live status
+                self.connection_pool.mark_live(connection)
+
+                if method == "HEAD":
+                    return 200 <= status < 300
+
+                if data:
+                    data = self.deserializer.loads(
+                        data, headers_response.get("content-type")
+                    )
+                return data
+
+    def close(self):
+        """
+        Explicitly closes connections
+        """
+        self.connection_pool.close()
+
+    def _resolve_request_args(self, method, params, body):
+        """Resolves parameters for .perform_request()"""
+        if body is not None:
+            body = self.serializer.dumps(body)
+
+            # some clients or environments don't support sending GET with body
+            if method in ("HEAD", "GET") and self.send_get_body_as != "GET":
+                # send it as post instead
+                if self.send_get_body_as == "POST":
+                    method = "POST"
+
+                # or as source parameter
+                elif self.send_get_body_as == "source":
+                    if params is None:
+                        params = {}
+                    params["source"] = body
+                    body = None
+
+        if body is not None:
+            try:
+                body = body.encode("utf-8", "surrogatepass")
+            except (UnicodeDecodeError, AttributeError):
+                # bytes/str - no need to re-encode
+                pass
+
+        ignore = ()
+        timeout = None
+        if params:
+            timeout = params.pop("request_timeout", None)
+            ignore = params.pop("ignore", ())
+            if isinstance(ignore, int):
+                ignore = (ignore,)
+
+        return method, params, body, ignore, timeout