diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 40097487..057c6084 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -225,21 +225,21 @@ jobs: matrix: include: - python: 3.9 - kafka: "2.4.0" - scala: "2.12" + kafka: "2.8.1" + scala: "2.13" # Older python versions against latest broker - python: 3.6 - kafka: "2.4.0" - scala: "2.12" + kafka: "2.8.1" + scala: "2.13" - python: 3.7 - kafka: "2.4.0" - scala: "2.12" + kafka: "2.8.1" + scala: "2.13" - python: 3.8 - kafka: "2.4.0" - scala: "2.12" + kafka: "2.8.1" + scala: "2.13" - # Older brokers against latest python version + # Older/newer brokers against latest python version - python: 3.9 kafka: "0.9.0.1" scala: "2.11" @@ -261,6 +261,18 @@ jobs: - python: 3.9 kafka: "2.3.1" scala: "2.12" + - python: 3.9 + kafka: "2.4.1" + scala: "2.12" + - python: 3.9 + kafka: "2.5.1" + scala: "2.12" + - python: 3.9 + kafka: "2.6.3" + scala: "2.12" + - python: 3.9 + kafka: "2.7.2" + scala: "2.13" fail-fast: false steps: diff --git a/.gitignore b/.gitignore index 79e30457..58c869b2 100644 --- a/.gitignore +++ b/.gitignore @@ -72,3 +72,6 @@ aiokafka/record/_crecords/legacy_records.c aiokafka/record/_crecords/memory_records.c aiokafka/record/_crecords/cutil.c aiokafka/record/_crecords/*.html + +# pyenv +.python-version diff --git a/CHANGES/731.doc b/CHANGES/731.doc new file mode 100644 index 00000000..e312e171 --- /dev/null +++ b/CHANGES/731.doc @@ -0,0 +1 @@ +fix MyRebalancer on docs/consumer.rst diff --git a/Makefile b/Makefile index fdedf30b..ba4e4211 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # Some simple testing tasks (sorry, UNIX only). FLAGS?=--maxfail=3 -SCALA_VERSION?=2.12 -KAFKA_VERSION?=2.2.2 +SCALA_VERSION?=2.13 +KAFKA_VERSION?=2.8.1 DOCKER_IMAGE=aiolibs/kafka:$(SCALA_VERSION)_$(KAFKA_VERSION) DIFF_BRANCH=origin/master FORMATTED_AREAS=aiokafka/util.py aiokafka/structs.py diff --git a/aiokafka/client.py b/aiokafka/client.py index f2eb3ee8..c1bdf6d1 100644 --- a/aiokafka/client.py +++ b/aiokafka/client.py @@ -4,16 +4,18 @@ import time from kafka.conn import collect_hosts -from kafka.protocol.metadata import MetadataRequest +from kafka.protocol.admin import DescribeAclsRequest_v2 from kafka.protocol.commit import OffsetFetchRequest from kafka.protocol.fetch import FetchRequest +from kafka.protocol.metadata import MetadataRequest +from kafka.protocol.offset import OffsetRequest +from kafka.protocol.produce import ProduceRequest import aiokafka.errors as Errors from aiokafka import __version__ from aiokafka.conn import create_conn, CloseReason from aiokafka.cluster import ClusterMetadata from aiokafka.protocol.coordination import FindCoordinatorRequest -from aiokafka.protocol.produce import ProduceRequest from aiokafka.errors import ( KafkaError, KafkaConnectionError, @@ -581,13 +583,19 @@ def _check_api_version_response(self, response): # in descending order. As soon as we find one that works, return it test_cases = [ # format (, ) - ((2, 3, 0), FetchRequest[0].API_KEY, 11), - ((2, 1, 0), MetadataRequest[0].API_KEY, 7), - ((1, 1, 0), FetchRequest[0].API_KEY, 7), - ((1, 0, 0), MetadataRequest[0].API_KEY, 5), - ((0, 11, 0), MetadataRequest[0].API_KEY, 4), - ((0, 10, 2), OffsetFetchRequest[0].API_KEY, 2), - ((0, 10, 1), MetadataRequest[0].API_KEY, 2), + # TODO Requires unreleased version of python-kafka + # ((2, 6, 0), DescribeClientQuotasRequest[0]), + ((2, 5, 0), DescribeAclsRequest_v2), + ((2, 4, 0), ProduceRequest[8]), + ((2, 3, 0), FetchRequest[11]), + ((2, 2, 0), OffsetRequest[5]), + ((2, 1, 0), FetchRequest[10]), + ((2, 0, 0), FetchRequest[8]), + ((1, 1, 0), FetchRequest[7]), + ((1, 0, 0), MetadataRequest[5]), + ((0, 11, 0), MetadataRequest[4]), + ((0, 10, 2), OffsetFetchRequest[2]), + ((0, 10, 1), MetadataRequest[2]), ] error_type = Errors.for_code(response.error_code) @@ -597,8 +605,8 @@ def _check_api_version_response(self, response): for api_key, _, max_version in response.api_versions } # Get the best match of test cases - for broker_version, api_key, version in test_cases: - if max_versions.get(api_key, -1) >= version: + for broker_version, struct in test_cases: + if max_versions.get(struct.API_KEY, -1) >= struct.API_VERSION: return broker_version # We know that ApiVersionResponse is only supported in 0.10+ diff --git a/aiokafka/conn.py b/aiokafka/conn.py index b8c5a788..bdc84e53 100644 --- a/aiokafka/conn.py +++ b/aiokafka/conn.py @@ -227,11 +227,15 @@ async def connect(self): self._idle_handle = loop.call_soon( self._idle_check, weakref.ref(self)) - if self._version_hint and self._version_hint >= (0, 10): - await self._do_version_lookup() + try: + if self._version_hint and self._version_hint >= (0, 10): + await self._do_version_lookup() - if self._security_protocol in ["SASL_SSL", "SASL_PLAINTEXT"]: - await self._do_sasl_handshake() + if self._security_protocol in ["SASL_SSL", "SASL_PLAINTEXT"]: + await self._do_sasl_handshake() + except: # noqa: E722 + self.close() + raise return reader, writer diff --git a/aiokafka/consumer/fetcher.py b/aiokafka/consumer/fetcher.py index 6c38b026..9db37129 100644 --- a/aiokafka/consumer/fetcher.py +++ b/aiokafka/consumer/fetcher.py @@ -7,8 +7,8 @@ import async_timeout from kafka.protocol.offset import OffsetRequest +from kafka.protocol.fetch import FetchRequest -from aiokafka.protocol.fetch import FetchRequest import aiokafka.errors as Errors from aiokafka.errors import ( ConsumerStoppedError, RecordTooLargeError, KafkaTimeoutError) diff --git a/aiokafka/producer/sender.py b/aiokafka/producer/sender.py index bc6c8f4e..b8faf1f6 100644 --- a/aiokafka/producer/sender.py +++ b/aiokafka/producer/sender.py @@ -3,6 +3,8 @@ import logging import time +from kafka.protocol.produce import ProduceRequest + import aiokafka.errors as Errors from aiokafka.client import ConnectionGroup, CoordinationType from aiokafka.errors import ( @@ -14,7 +16,6 @@ OutOfOrderSequenceNumber, TopicAuthorizationFailedError, GroupAuthorizationFailedError, TransactionalIdAuthorizationFailed, OperationNotAttempted) -from aiokafka.protocol.produce import ProduceRequest from aiokafka.protocol.transaction import ( InitProducerIdRequest, AddPartitionsToTxnRequest, EndTxnRequest, AddOffsetsToTxnRequest, TxnOffsetCommitRequest diff --git a/aiokafka/protocol/fetch.py b/aiokafka/protocol/fetch.py deleted file mode 100644 index 6407b6eb..00000000 --- a/aiokafka/protocol/fetch.py +++ /dev/null @@ -1,212 +0,0 @@ -from kafka.protocol.api import Request, Response -from kafka.protocol.types import ( - Array, Int8, Int16, Int32, Int64, Schema, String, Bytes -) - - -class FetchResponse_v0(Response): - API_KEY = 1 - API_VERSION = 0 - SCHEMA = Schema( - ('topics', Array( - ('topics', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('error_code', Int16), - ('highwater_offset', Int64), - ('message_set', Bytes))))) - ) - - -class FetchResponse_v1(Response): - API_KEY = 1 - API_VERSION = 1 - SCHEMA = Schema( - ('throttle_time_ms', Int32), - ('topics', Array( - ('topics', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('error_code', Int16), - ('highwater_offset', Int64), - ('message_set', Bytes))))) - ) - - -class FetchResponse_v2(Response): - API_KEY = 1 - API_VERSION = 2 - SCHEMA = FetchResponse_v1.SCHEMA # message format changed internally - - -class FetchResponse_v3(Response): - API_KEY = 1 - API_VERSION = 3 - SCHEMA = FetchResponse_v2.SCHEMA - - -class FetchResponse_v4(Response): - API_KEY = 1 - API_VERSION = 4 - SCHEMA = Schema( - ('throttle_time_ms', Int32), - ('topics', Array( - ('topics', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('error_code', Int16), - ('highwater_offset', Int64), - ('last_stable_offset', Int64), - ('aborted_transactions', Array( - ('producer_id', Int64), - ('first_offset', Int64))), - ('message_set', Bytes))))) - ) - - -class FetchResponse_v5(Response): - API_KEY = 1 - API_VERSION = 5 - SCHEMA = Schema( - ('throttle_time_ms', Int32), - ('topics', Array( - ('topics', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('error_code', Int16), - ('highwater_offset', Int64), - ('last_stable_offset', Int64), - ('log_start_offset', Int64), - ('aborted_transactions', Array( - ('producer_id', Int64), - ('first_offset', Int64))), - ('message_set', Bytes))))) - ) - - -class FetchResponse_v6(Response): - """ - Same as FetchResponse_v5. The version number is bumped up to indicate that - the client supports KafkaStorageException. - The KafkaStorageException will be translated to - NotLeaderForPartitionException in the response if version <= 5 - """ - API_KEY = 1 - API_VERSION = 6 - SCHEMA = FetchResponse_v5.SCHEMA - - -class FetchRequest_v0(Request): - API_KEY = 1 - API_VERSION = 0 - RESPONSE_TYPE = FetchResponse_v0 - SCHEMA = Schema( - ('replica_id', Int32), - ('max_wait_time', Int32), - ('min_bytes', Int32), - ('topics', Array( - ('topic', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('offset', Int64), - ('max_bytes', Int32))))) - ) - - -class FetchRequest_v1(Request): - API_KEY = 1 - API_VERSION = 1 - RESPONSE_TYPE = FetchResponse_v1 - SCHEMA = FetchRequest_v0.SCHEMA - - -class FetchRequest_v2(Request): - API_KEY = 1 - API_VERSION = 2 - RESPONSE_TYPE = FetchResponse_v2 - SCHEMA = FetchRequest_v1.SCHEMA - - -class FetchRequest_v3(Request): - API_KEY = 1 - API_VERSION = 3 - RESPONSE_TYPE = FetchResponse_v3 - SCHEMA = Schema( - ('replica_id', Int32), - ('max_wait_time', Int32), - ('min_bytes', Int32), - ('max_bytes', Int32), # This new field is only difference from FR_v2 - ('topics', Array( - ('topic', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('offset', Int64), - ('max_bytes', Int32))))) - ) - - -class FetchRequest_v4(Request): - # Adds isolation_level field - API_KEY = 1 - API_VERSION = 4 - RESPONSE_TYPE = FetchResponse_v4 - SCHEMA = Schema( - ('replica_id', Int32), - ('max_wait_time', Int32), - ('min_bytes', Int32), - ('max_bytes', Int32), - ('isolation_level', Int8), - ('topics', Array( - ('topic', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('offset', Int64), - ('max_bytes', Int32))))) - ) - - -class FetchRequest_v5(Request): - # This may only be used in broker-broker api calls - API_KEY = 1 - API_VERSION = 5 - RESPONSE_TYPE = FetchResponse_v5 - SCHEMA = Schema( - ('replica_id', Int32), - ('max_wait_time', Int32), - ('min_bytes', Int32), - ('max_bytes', Int32), - ('isolation_level', Int8), - ('topics', Array( - ('topic', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('fetch_offset', Int64), - ('log_start_offset', Int64), - ('max_bytes', Int32))))) - ) - - -class FetchRequest_v6(Request): - """ - The body of FETCH_REQUEST_V6 is the same as FETCH_REQUEST_V5. - The version number is bumped up to indicate that the client supports - KafkaStorageException. - The KafkaStorageException will be translated to - NotLeaderForPartitionException in the response if version <= 5 - """ - API_KEY = 1 - API_VERSION = 6 - RESPONSE_TYPE = FetchResponse_v6 - SCHEMA = FetchRequest_v5.SCHEMA - - -FetchRequest = [ - FetchRequest_v0, FetchRequest_v1, FetchRequest_v2, - FetchRequest_v3, FetchRequest_v4, FetchRequest_v5, - FetchRequest_v6 -] -FetchResponse = [ - FetchResponse_v0, FetchResponse_v1, FetchResponse_v2, - FetchResponse_v3, FetchResponse_v4, FetchResponse_v5, - FetchResponse_v6 -] diff --git a/aiokafka/protocol/produce.py b/aiokafka/protocol/produce.py deleted file mode 100644 index 3d3725ba..00000000 --- a/aiokafka/protocol/produce.py +++ /dev/null @@ -1,238 +0,0 @@ -from kafka.protocol.api import Request, Response -from kafka.protocol.types import ( - Int16, Int32, Int64, String, Array, Schema, Bytes -) - - -class ProduceResponse_v0(Response): - API_KEY = 0 - API_VERSION = 0 - SCHEMA = Schema( - ('topics', Array( - ('topic', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('error_code', Int16), - ('offset', Int64))))) - ) - - -class ProduceResponse_v1(Response): - API_KEY = 0 - API_VERSION = 1 - SCHEMA = Schema( - ('topics', Array( - ('topic', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('error_code', Int16), - ('offset', Int64))))), - ('throttle_time_ms', Int32) - ) - - -class ProduceResponse_v2(Response): - API_KEY = 0 - API_VERSION = 2 - SCHEMA = Schema( - ('topics', Array( - ('topic', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('error_code', Int16), - ('offset', Int64), - ('timestamp', Int64))))), - ('throttle_time_ms', Int32) - ) - - -class ProduceResponse_v3(Response): - API_KEY = 0 - API_VERSION = 3 - SCHEMA = ProduceResponse_v2.SCHEMA - - -class ProduceResponse_v4(Response): - """ - The version number is bumped up to indicate that the client supports - KafkaStorageException. - The KafkaStorageException will be translated to - NotLeaderForPartitionException in the response if version <= 3 - """ - API_KEY = 0 - API_VERSION = 4 - SCHEMA = ProduceResponse_v3.SCHEMA - - -class ProduceResponse_v5(Response): - API_KEY = 0 - API_VERSION = 5 - SCHEMA = Schema( - ('topics', Array( - ('topic', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('error_code', Int16), - ('offset', Int64), - ('timestamp', Int64), - ('log_start_offset', Int64))))), - ('throttle_time_ms', Int32) - ) - - -class ProduceResponse_v6(Response): - """ - The version number is bumped to indicate that on quota violation brokers - send out responses before throttling. - """ - API_KEY = 0 - API_VERSION = 6 - SCHEMA = ProduceResponse_v5.SCHEMA - - -class ProduceResponse_v7(Response): - """ - V7 bumped up to indicate ZStandard capability. (see KIP-110) - """ - API_KEY = 0 - API_VERSION = 7 - SCHEMA = ProduceResponse_v6.SCHEMA - - -class ProduceResponse_v8(Response): - """ - V8 bumped up to add two new fields record_errors offset list and error_message - (See KIP-467) - """ - API_KEY = 0 - API_VERSION = 8 - SCHEMA = Schema( - ('topics', Array( - ('topic', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('error_code', Int16), - ('offset', Int64), - ('timestamp', Int64), - ('log_start_offset', Int64)), - ('record_errors', (Array( - ('batch_index', Int32), - ('batch_index_error_message', String('utf-8')) - ))), - ('error_message', String('utf-8')) - ))), - ('throttle_time_ms', Int32) - ) - - -class ProduceRequest(Request): - API_KEY = 0 - - def expect_response(self): - if self.required_acks == 0: # pylint: disable=no-member - return False - return True - - -class ProduceRequest_v0(ProduceRequest): - API_VERSION = 0 - RESPONSE_TYPE = ProduceResponse_v0 - SCHEMA = Schema( - ('required_acks', Int16), - ('timeout', Int32), - ('topics', Array( - ('topic', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('messages', Bytes))))) - ) - - -class ProduceRequest_v1(ProduceRequest): - API_VERSION = 1 - RESPONSE_TYPE = ProduceResponse_v1 - SCHEMA = ProduceRequest_v0.SCHEMA - - -class ProduceRequest_v2(ProduceRequest): - API_VERSION = 2 - RESPONSE_TYPE = ProduceResponse_v2 - SCHEMA = ProduceRequest_v1.SCHEMA - - -class ProduceRequest_v3(ProduceRequest): - API_VERSION = 3 - RESPONSE_TYPE = ProduceResponse_v3 - SCHEMA = Schema( - ('transactional_id', String('utf-8')), - ('required_acks', Int16), - ('timeout', Int32), - ('topics', Array( - ('topic', String('utf-8')), - ('partitions', Array( - ('partition', Int32), - ('messages', Bytes))))) - ) - - -class ProduceRequest_v4(ProduceRequest): - """ - The version number is bumped up to indicate that the client supports - KafkaStorageException. - The KafkaStorageException will be translated to - NotLeaderForPartitionException in the response if version <= 3 - """ - API_VERSION = 4 - RESPONSE_TYPE = ProduceResponse_v4 - SCHEMA = ProduceRequest_v3.SCHEMA - - -class ProduceRequest_v5(ProduceRequest): - """ - Same as v4. The version number is bumped since the v5 response includes an - additional partition level field: the log_start_offset. - """ - API_VERSION = 5 - RESPONSE_TYPE = ProduceResponse_v5 - SCHEMA = ProduceRequest_v4.SCHEMA - - -class ProduceRequest_v6(ProduceRequest): - """ - The version number is bumped to indicate that on quota violation brokers - send out responses before throttling. - """ - API_VERSION = 6 - RESPONSE_TYPE = ProduceResponse_v6 - SCHEMA = ProduceRequest_v5.SCHEMA - - -class ProduceRequest_v7(ProduceRequest): - """ - V7 bumped up to indicate ZStandard capability. (see KIP-110) - """ - API_VERSION = 7 - RESPONSE_TYPE = ProduceResponse_v7 - SCHEMA = ProduceRequest_v6.SCHEMA - - -class ProduceRequest_v8(ProduceRequest): - """ - V8 bumped up to add two new fields record_errors offset list and error_message - to PartitionResponse (See KIP-467) - """ - API_VERSION = 8 - RESPONSE_TYPE = ProduceResponse_v8 - SCHEMA = ProduceRequest_v7.SCHEMA - - -ProduceRequest = [ - ProduceRequest_v0, ProduceRequest_v1, ProduceRequest_v2, - ProduceRequest_v3, ProduceRequest_v4, ProduceRequest_v5, - ProduceRequest_v6, ProduceRequest_v7, ProduceRequest_v8, -] -ProduceResponse = [ - ProduceResponse_v0, ProduceResponse_v1, ProduceResponse_v2, - ProduceResponse_v3, ProduceResponse_v4, ProduceResponse_v5, - ProduceResponse_v6, ProduceResponse_v7, ProduceResponse_v8, -] diff --git a/aiokafka/util.py b/aiokafka/util.py index 21de4b2b..5dab710c 100644 --- a/aiokafka/util.py +++ b/aiokafka/util.py @@ -1,10 +1,10 @@ import asyncio import os from asyncio import AbstractEventLoop -from distutils.version import StrictVersion -from typing import Awaitable, Dict, Tuple, TypeVar, Union +from typing import Awaitable, Dict, Tuple, TypeVar, Union, cast import async_timeout +from packaging.version import Version from .structs import OffsetAndMetadata, TopicPartition @@ -40,7 +40,11 @@ async def wait_for(fut: Awaitable[T], timeout: Union[None, int, float] = None) - def parse_kafka_version(api_version: str) -> Tuple[int, int, int]: - version = StrictVersion(api_version).version + parsed = Version(api_version).release + if not 2 <= len(parsed) <= 3: + raise ValueError(api_version) + version = cast(Tuple[int, int, int], (parsed + (0,))[:3]) + if not (0, 9) <= version < (3, 0): raise ValueError(api_version) return version diff --git a/docker/build.py b/docker/build.py index c95c6689..43c35a47 100644 --- a/docker/build.py +++ b/docker/build.py @@ -5,10 +5,10 @@ import argparse -async def build(versions_file, args, *, loop): +async def build(versions_file, args): with open(versions_file) as f: - config = yaml.load(f.read()) + config = yaml.load(f.read(), yaml.Loader) for action in args.actions: procs = [] diff --git a/docker/config.yml b/docker/config.yml index 4d742f6f..1a2b6b65 100644 --- a/docker/config.yml +++ b/docker/config.yml @@ -22,5 +22,20 @@ versions: kafka: "2.3.1" scala: "2.12" - - kafka: "2.4.0" + kafka: "2.4.1" scala: "2.12" + - + kafka: "2.5.1" + scala: "2.12" + - + kafka: "2.6.3" + scala: "2.12" + - + kafka: "2.7.2" + scala: "2.13" + - + kafka: "2.8.1" + scala: "2.13" + # - + # kafka: "3.0.0" + # scala: "2.13" diff --git a/docs/consumer.rst b/docs/consumer.rst index e2ab01b3..cebe662c 100644 --- a/docs/consumer.rst +++ b/docs/consumer.rst @@ -368,7 +368,7 @@ etc. See :meth:`~aiokafka.AIOKafkaConsumer.subscribe` docs for more details. class MyRebalancer(aiokafka.ConsumerRebalanceListener): async def on_partitions_revoked(self, revoked): - async with self.lock: + async with lock: pass async def on_partitions_assigned(self, assigned): @@ -377,7 +377,7 @@ etc. See :meth:`~aiokafka.AIOKafkaConsumer.subscribe` docs for more details. async def main(): consumer.subscribe("topic", listener=MyRebalancer()) while True: - async with self.lock: + async with lock: msgs = await consumer.getmany(timeout_ms=1000) # process messages diff --git a/pytest.ini b/pytest.ini index 6171b335..c3d76d9c 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,6 +1,10 @@ [pytest] filterwarnings = error + # https://github.com/docker/docker-py/issues/1293 ignore:.*docker.sock.*:ResourceWarning + ignore:distutils Version classes are deprecated:DeprecationWarning:docker + # Actually comes from docker importing distutils on Windows + ignore:the imp module is deprecated in favour of importlib:DeprecationWarning:pywintypes markers = ssl: Tests that require SSL certificates to run diff --git a/setup.cfg b/setup.cfg index d3c66bc0..6b23dfa0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -26,7 +26,6 @@ strict_optional = True warn_redundant_casts = True warn_unused_ignores = True warn_unused_configs = True -mypy_path=stubs [mypy-pytest] ignore_missing_imports = True @@ -35,6 +34,7 @@ ignore_missing_imports = True ignore_missing_imports = True [mypy-kafka.*] +ignore_missing_imports = True warn_no_return = False disallow_untyped_defs = False diff --git a/setup.py b/setup.py index 95c80c20..a89f6caa 100644 --- a/setup.py +++ b/setup.py @@ -2,11 +2,11 @@ import platform import re import sys -from distutils.command.bdist_rpm import bdist_rpm as _bdist_rpm -from distutils.command.build_ext import build_ext -from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError from setuptools import Extension, setup +from setuptools.command.bdist_rpm import bdist_rpm as _bdist_rpm +from setuptools.command.build_ext import build_ext +from setuptools.errors import CCompilerError, ExecError, PlatformError # Those are needed to build _hton for windows @@ -88,20 +88,21 @@ class ve_build_ext(build_ext): def run(self): try: build_ext.run(self) - except (DistutilsPlatformError, FileNotFoundError): + except (PlatformError, FileNotFoundError): raise BuildFailed() def build_extension(self, ext): try: build_ext.build_extension(self, ext) - except (CCompilerError, DistutilsExecError, DistutilsPlatformError, ValueError): + except (CCompilerError, ExecError, PlatformError, ValueError): raise BuildFailed() install_requires = [ "async-timeout", - "kafka-python>=2.0.0", + "kafka-python>=2.0.2", "dataclasses>=0.5; python_version<'3.7'", + "packaging", ] PY_VER = sys.version_info diff --git a/stubs/kafka/__init__.pyi b/stubs/kafka/__init__.pyi deleted file mode 100644 index 24e0b2d9..00000000 --- a/stubs/kafka/__init__.pyi +++ /dev/null @@ -1,11 +0,0 @@ -import logging -from kafka.admin import KafkaAdminClient as KafkaAdminClient -from kafka.client_async import KafkaClient as KafkaClient -from kafka.conn import BrokerConnection as BrokerConnection -from kafka.consumer import KafkaConsumer as KafkaConsumer -from kafka.consumer.subscription_state import ConsumerRebalanceListener as ConsumerRebalanceListener -from kafka.producer import KafkaProducer as KafkaProducer -from typing import Any - -class NullHandler(logging.Handler): - def emit(self, record: Any) -> None: ... diff --git a/stubs/kafka/admin/__init__.pyi b/stubs/kafka/admin/__init__.pyi deleted file mode 100644 index 8c1ee0d8..00000000 --- a/stubs/kafka/admin/__init__.pyi +++ /dev/null @@ -1,5 +0,0 @@ -from kafka.admin.acl_resource import ACL as ACL, ACLFilter as ACLFilter, ACLOperation as ACLOperation, ACLPermissionType as ACLPermissionType, ACLResourcePatternType as ACLResourcePatternType, ResourcePattern as ResourcePattern, ResourcePatternFilter as ResourcePatternFilter, ResourceType as ResourceType -from kafka.admin.client import KafkaAdminClient as KafkaAdminClient -from kafka.admin.config_resource import ConfigResource as ConfigResource, ConfigResourceType as ConfigResourceType -from kafka.admin.new_partitions import NewPartitions as NewPartitions -from kafka.admin.new_topic import NewTopic as NewTopic diff --git a/stubs/kafka/admin/acl_resource.pyi b/stubs/kafka/admin/acl_resource.pyi deleted file mode 100644 index c5f7827c..00000000 --- a/stubs/kafka/admin/acl_resource.pyi +++ /dev/null @@ -1,65 +0,0 @@ -from kafka.errors import IllegalArgumentError as IllegalArgumentError -from kafka.vendor.enum34 import IntEnum as IntEnum -from typing import Any - -class ResourceType(IntEnum): - UNKNOWN: Any = ... - ANY: Any = ... - CLUSTER: Any = ... - DELEGATION_TOKEN: Any = ... - GROUP: Any = ... - TOPIC: Any = ... - TRANSACTIONAL_ID: int = ... - -class ACLOperation(IntEnum): - ANY: Any = ... - ALL: Any = ... - READ: Any = ... - WRITE: Any = ... - CREATE: Any = ... - DELETE: Any = ... - ALTER: Any = ... - DESCRIBE: Any = ... - CLUSTER_ACTION: Any = ... - DESCRIBE_CONFIGS: Any = ... - ALTER_CONFIGS: Any = ... - IDEMPOTENT_WRITE: int = ... - -class ACLPermissionType(IntEnum): - ANY: Any = ... - DENY: Any = ... - ALLOW: int = ... - -class ACLResourcePatternType(IntEnum): - ANY: Any = ... - MATCH: Any = ... - LITERAL: Any = ... - PREFIXED: int = ... - -class ACLFilter: - principal: Any = ... - host: Any = ... - operation: Any = ... - permission_type: Any = ... - resource_pattern: Any = ... - def __init__(self, principal: Any, host: Any, operation: Any, permission_type: Any, resource_pattern: Any) -> None: ... - def validate(self) -> None: ... - def __eq__(self, other: Any) -> Any: ... - def __hash__(self) -> Any: ... - -class ACL(ACLFilter): - def __init__(self, principal: Any, host: Any, operation: Any, permission_type: Any, resource_pattern: Any) -> None: ... - def validate(self) -> None: ... - -class ResourcePatternFilter: - resource_type: Any = ... - resource_name: Any = ... - pattern_type: Any = ... - def __init__(self, resource_type: Any, resource_name: Any, pattern_type: Any) -> None: ... - def validate(self) -> None: ... - def __eq__(self, other: Any) -> Any: ... - def __hash__(self) -> Any: ... - -class ResourcePattern(ResourcePatternFilter): - def __init__(self, resource_type: Any, resource_name: Any, pattern_type: Any = ...) -> None: ... - def validate(self) -> None: ... diff --git a/stubs/kafka/admin/client.pyi b/stubs/kafka/admin/client.pyi deleted file mode 100644 index 75a46417..00000000 --- a/stubs/kafka/admin/client.pyi +++ /dev/null @@ -1,32 +0,0 @@ -from . import ConfigResourceType as ConfigResourceType -from kafka.admin.acl_resource import ACL as ACL, ACLFilter as ACLFilter, ACLOperation as ACLOperation, ACLPermissionType as ACLPermissionType, ACLResourcePatternType as ACLResourcePatternType, ResourcePattern as ResourcePattern, ResourceType as ResourceType -from kafka.client_async import KafkaClient as KafkaClient -from kafka.errors import IllegalArgumentError as IllegalArgumentError, IncompatibleBrokerVersion as IncompatibleBrokerVersion, KafkaConfigurationError as KafkaConfigurationError, NotControllerError as NotControllerError, UnrecognizedBrokerVersion as UnrecognizedBrokerVersion -from kafka.metrics import MetricConfig as MetricConfig, Metrics as Metrics -from kafka.protocol.admin import AlterConfigsRequest as AlterConfigsRequest, CreateAclsRequest as CreateAclsRequest, CreatePartitionsRequest as CreatePartitionsRequest, CreateTopicsRequest as CreateTopicsRequest, DeleteAclsRequest as DeleteAclsRequest, DeleteTopicsRequest as DeleteTopicsRequest, DescribeAclsRequest as DescribeAclsRequest, DescribeConfigsRequest as DescribeConfigsRequest, DescribeGroupsRequest as DescribeGroupsRequest, ListGroupsRequest as ListGroupsRequest -from kafka.protocol.commit import GroupCoordinatorRequest as GroupCoordinatorRequest, OffsetFetchRequest as OffsetFetchRequest -from kafka.protocol.metadata import MetadataRequest as MetadataRequest -from kafka.structs import OffsetAndMetadata as OffsetAndMetadata, TopicPartition as TopicPartition -from typing import Any, Optional - -log: Any - -class KafkaAdminClient: - DEFAULT_CONFIG: Any = ... - config: Any = ... - def __init__(self, **configs: Any) -> None: ... - def close(self) -> None: ... - def create_topics(self, new_topics: Any, timeout_ms: Optional[Any] = ..., validate_only: bool = ...): ... - def delete_topics(self, topics: Any, timeout_ms: Optional[Any] = ...): ... - def list_topics(self): ... - def describe_topics(self, topics: Optional[Any] = ...): ... - def describe_cluster(self): ... - def describe_acls(self, acl_filter: Any): ... - def create_acls(self, acls: Any): ... - def delete_acls(self, acl_filters: Any): ... - def describe_configs(self, config_resources: Any, include_synonyms: bool = ...): ... - def alter_configs(self, config_resources: Any): ... - def create_partitions(self, topic_partitions: Any, timeout_ms: Optional[Any] = ..., validate_only: bool = ...): ... - def describe_consumer_groups(self, group_ids: Any, group_coordinator_id: Optional[Any] = ..., include_authorized_operations: bool = ...): ... - def list_consumer_groups(self, broker_ids: Optional[Any] = ...): ... - def list_consumer_group_offsets(self, group_id: Any, group_coordinator_id: Optional[Any] = ..., partitions: Optional[Any] = ...): ... diff --git a/stubs/kafka/admin/config_resource.pyi b/stubs/kafka/admin/config_resource.pyi deleted file mode 100644 index 509f83ca..00000000 --- a/stubs/kafka/admin/config_resource.pyi +++ /dev/null @@ -1,12 +0,0 @@ -from kafka.vendor.enum34 import IntEnum as IntEnum -from typing import Any, Optional - -class ConfigResourceType(IntEnum): - BROKER: Any = ... - TOPIC: int = ... - -class ConfigResource: - resource_type: Any = ... - name: Any = ... - configs: Any = ... - def __init__(self, resource_type: Any, name: Any, configs: Optional[Any] = ...) -> None: ... diff --git a/stubs/kafka/admin/new_partitions.pyi b/stubs/kafka/admin/new_partitions.pyi deleted file mode 100644 index aeb31ad4..00000000 --- a/stubs/kafka/admin/new_partitions.pyi +++ /dev/null @@ -1,6 +0,0 @@ -from typing import Any, Optional - -class NewPartitions: - total_count: Any = ... - new_assignments: Any = ... - def __init__(self, total_count: Any, new_assignments: Optional[Any] = ...) -> None: ... diff --git a/stubs/kafka/admin/new_topic.pyi b/stubs/kafka/admin/new_topic.pyi deleted file mode 100644 index e9458c06..00000000 --- a/stubs/kafka/admin/new_topic.pyi +++ /dev/null @@ -1,10 +0,0 @@ -from kafka.errors import IllegalArgumentError as IllegalArgumentError -from typing import Any, Optional - -class NewTopic: - name: Any = ... - num_partitions: Any = ... - replication_factor: Any = ... - replica_assignments: Any = ... - topic_configs: Any = ... - def __init__(self, name: Any, num_partitions: Any, replication_factor: Any, replica_assignments: Optional[Any] = ..., topic_configs: Optional[Any] = ...) -> None: ... diff --git a/stubs/kafka/client_async.pyi b/stubs/kafka/client_async.pyi deleted file mode 100644 index 3875c939..00000000 --- a/stubs/kafka/client_async.pyi +++ /dev/null @@ -1,56 +0,0 @@ -from kafka.cluster import ClusterMetadata as ClusterMetadata -from kafka.conn import BrokerConnection as BrokerConnection, ConnectionStates as ConnectionStates, collect_hosts as collect_hosts, get_ip_port_afi as get_ip_port_afi -from kafka.future import Future as Future -from kafka.metrics import AnonMeasurable as AnonMeasurable -from kafka.metrics.stats import Avg as Avg, Count as Count, Rate as Rate -from kafka.metrics.stats.rate import TimeUnit as TimeUnit -from kafka.protocol.metadata import MetadataRequest as MetadataRequest -from kafka.vendor import socketpair as socketpair -from typing import Any, Optional - -log: Any - -class KafkaClient: - DEFAULT_CONFIG: Any = ... - config: Any = ... - cluster: Any = ... - def __init__(self, **configs: Any) -> None: ... - def maybe_connect(self, node_id: Any, wakeup: bool = ...): ... - def ready(self, node_id: Any, metadata_priority: bool = ...): ... - def connected(self, node_id: Any): ... - def close(self, node_id: Optional[Any] = ...) -> None: ... - def __del__(self) -> None: ... - def is_disconnected(self, node_id: Any): ... - def connection_delay(self, node_id: Any): ... - def is_ready(self, node_id: Any, metadata_priority: bool = ...): ... - def send(self, node_id: Any, request: Any, wakeup: bool = ...): ... - def poll(self, timeout_ms: Optional[Any] = ..., future: Optional[Any] = ...): ... - def in_flight_request_count(self, node_id: Optional[Any] = ...): ... - def least_loaded_node(self): ... - def set_topics(self, topics: Any): ... - def add_topic(self, topic: Any): ... - def get_api_versions(self): ... - def check_version(self, node_id: Optional[Any] = ..., timeout: int = ..., strict: bool = ...): ... - def wakeup(self) -> None: ... - def bootstrap_connected(self): ... - -class IdleConnectionManager: - connections_max_idle: Any = ... - next_idle_close_check_time: Any = ... - lru_connections: Any = ... - def __init__(self, connections_max_idle_ms: Any) -> None: ... - def update(self, conn_id: Any) -> None: ... - def remove(self, conn_id: Any) -> None: ... - def is_expired(self, conn_id: Any): ... - def next_check_ms(self): ... - def update_next_idle_close_check_time(self, ts: Any) -> None: ... - def poll_expired_connection(self): ... - -class KafkaClientMetrics: - metrics: Any = ... - metric_group_name: Any = ... - connection_closed: Any = ... - connection_created: Any = ... - select_time: Any = ... - io_time: Any = ... - def __init__(self, metrics: Any, metric_group_prefix: Any, conns: Any): ... diff --git a/stubs/kafka/cluster.pyi b/stubs/kafka/cluster.pyi deleted file mode 100644 index 5db9a199..00000000 --- a/stubs/kafka/cluster.pyi +++ /dev/null @@ -1,33 +0,0 @@ -from kafka.conn import collect_hosts as collect_hosts -from kafka.future import Future as Future -from kafka.structs import BrokerMetadata as BrokerMetadata, PartitionMetadata as PartitionMetadata, TopicPartition as TopicPartition -from typing import Any - -log: Any - -class ClusterMetadata: - DEFAULT_CONFIG: Any = ... - need_all_topic_metadata: bool = ... - unauthorized_topics: Any = ... - internal_topics: Any = ... - controller: Any = ... - config: Any = ... - def __init__(self, **configs: Any) -> None: ... - def is_bootstrap(self, node_id: Any): ... - def brokers(self): ... - def broker_metadata(self, broker_id: Any): ... - def partitions_for_topic(self, topic: Any): ... - def available_partitions_for_topic(self, topic: Any): ... - def leader_for_partition(self, partition: Any): ... - def partitions_for_broker(self, broker_id: Any): ... - def coordinator_for_group(self, group: Any): ... - def ttl(self): ... - def refresh_backoff(self): ... - def request_update(self): ... - def topics(self, exclude_internal_topics: bool = ...): ... - def failed_update(self, exception: Any) -> None: ... - def update_metadata(self, metadata: Any): ... - def add_listener(self, listener: Any) -> None: ... - def remove_listener(self, listener: Any) -> None: ... - def add_group_coordinator(self, group: Any, response: Any): ... - def with_partitions(self, partitions_to_add: Any): ... diff --git a/stubs/kafka/codec.pyi b/stubs/kafka/codec.pyi deleted file mode 100644 index 8129ae14..00000000 --- a/stubs/kafka/codec.pyi +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Any, Optional - -PYPY: Any - -def has_gzip(): ... -def has_snappy(): ... -def has_lz4(): ... -def gzip_encode(payload: Any, compresslevel: Optional[Any] = ...): ... -def gzip_decode(payload: Any): ... -def snappy_encode(payload: Any, xerial_compatible: bool = ..., xerial_blocksize: Any = ...): ... -def snappy_decode(payload: Any): ... - -lz4_encode: Any - -def lz4f_decode(payload: Any): ... - -lz4_decode: Any -lz4_decode = lz4f_decode - -def lz4_encode_old_kafka(payload: Any): ... -def lz4_decode_old_kafka(payload: Any): ... diff --git a/stubs/kafka/conn.pyi b/stubs/kafka/conn.pyi deleted file mode 100644 index f0c867f4..00000000 --- a/stubs/kafka/conn.pyi +++ /dev/null @@ -1,74 +0,0 @@ -import ssl -from kafka.future import Future as Future -from kafka.metrics.stats import Avg as Avg, Count as Count, Max as Max, Rate as Rate -from kafka.oauth.abstract import AbstractTokenProvider as AbstractTokenProvider -from kafka.protocol.admin import SaslHandShakeRequest as SaslHandShakeRequest -from kafka.protocol.commit import OffsetFetchRequest as OffsetFetchRequest -from kafka.protocol.metadata import MetadataRequest as MetadataRequest -from kafka.protocol.parser import KafkaProtocol as KafkaProtocol -from kafka.protocol.types import Int32 as Int32, Int8 as Int8 -from kafka.scram import ScramClient as ScramClient -from typing import Any, Optional - -log: Any -DEFAULT_KAFKA_PORT: int -SASL_QOP_AUTH: int -SASL_QOP_AUTH_INT: int -SASL_QOP_AUTH_CONF: int -ssl_available: bool - -class SSLWantReadError(Exception): ... -class SSLWantWriteError(Exception): ... - -AFI_NAMES: Any - -class ConnectionStates: - DISCONNECTING: str = ... - DISCONNECTED: str = ... - CONNECTING: str = ... - HANDSHAKE: str = ... - CONNECTED: str = ... - AUTHENTICATING: str = ... - -class BrokerConnection: - DEFAULT_CONFIG: Any = ... - SECURITY_PROTOCOLS: Any = ... - SASL_MECHANISMS: Any = ... - host: Any = ... - port: Any = ... - afi: Any = ... - config: Any = ... - node_id: Any = ... - in_flight_requests: Any = ... - state: Any = ... - last_attempt: int = ... - def __init__(self, host: Any, port: Any, afi: Any, **configs: Any) -> None: ... - def connect_blocking(self, timeout: Any = ...): ... - def connect(self): ... - def blacked_out(self): ... - def connection_delay(self): ... - def connected(self): ... - def connecting(self): ... - def disconnected(self): ... - def __del__(self) -> None: ... - def close(self, error: Optional[Any] = ...) -> None: ... - def send(self, request: Any, blocking: bool = ...): ... - def send_pending_requests(self): ... - def send_pending_requests_v2(self): ... - def can_send_more(self): ... - def recv(self): ... - def requests_timed_out(self): ... - def get_api_versions(self): ... - def check_version(self, timeout: int = ..., strict: bool = ..., topics: Any = ...): ... - -class BrokerConnectionMetrics: - metrics: Any = ... - bytes_sent: Any = ... - bytes_received: Any = ... - request_time: Any = ... - def __init__(self, metrics: Any, metric_group_prefix: Any, node_id: Any) -> None: ... - -def get_ip_port_afi(host_and_port_str: Any): ... -def collect_hosts(hosts: Any, randomize: bool = ...): ... -def is_inet_4_or_6(gai: Any): ... -def dns_lookup(host: Any, port: Any, afi: Any = ...): ... diff --git a/stubs/kafka/consumer/__init__.pyi b/stubs/kafka/consumer/__init__.pyi deleted file mode 100644 index ff36d4d8..00000000 --- a/stubs/kafka/consumer/__init__.pyi +++ /dev/null @@ -1 +0,0 @@ -from kafka.consumer.group import KafkaConsumer as KafkaConsumer diff --git a/stubs/kafka/consumer/fetcher.pyi b/stubs/kafka/consumer/fetcher.pyi deleted file mode 100644 index 63d638b0..00000000 --- a/stubs/kafka/consumer/fetcher.pyi +++ /dev/null @@ -1,66 +0,0 @@ -import kafka.errors as Errors -import six -from collections import namedtuple -from kafka.future import Future as Future -from kafka.metrics.stats import Avg as Avg, Count as Count, Max as Max, Rate as Rate -from kafka.protocol.fetch import FetchRequest as FetchRequest -from kafka.protocol.offset import OffsetRequest as OffsetRequest, OffsetResetStrategy as OffsetResetStrategy, UNKNOWN_OFFSET as UNKNOWN_OFFSET -from kafka.record import MemoryRecords as MemoryRecords -from kafka.serializer import Deserializer as Deserializer -from kafka.structs import OffsetAndTimestamp as OffsetAndTimestamp, TopicPartition as TopicPartition -from typing import Any, Optional - -log: Any -READ_UNCOMMITTED: int -READ_COMMITTED: int - -ConsumerRecord = namedtuple('ConsumerRecord', ['topic', 'partition', 'offset', 'timestamp', 'timestamp_type', 'key', 'value', 'headers', 'checksum', 'serialized_key_size', 'serialized_value_size', 'serialized_header_size']) - -CompletedFetch = namedtuple('CompletedFetch', ['topic_partition', 'fetched_offset', 'response_version', 'partition_data', 'metric_aggregator']) - -class NoOffsetForPartitionError(Errors.KafkaError): ... -class RecordTooLargeError(Errors.KafkaError): ... - -class Fetcher(six.Iterator): - DEFAULT_CONFIG: Any = ... - config: Any = ... - def __init__(self, client: Any, subscriptions: Any, metrics: Any, **configs: Any) -> None: ... - def send_fetches(self): ... - def reset_offsets_if_needed(self, partitions: Any) -> None: ... - def in_flight_fetches(self): ... - def update_fetch_positions(self, partitions: Any) -> None: ... - def get_offsets_by_times(self, timestamps: Any, timeout_ms: Any): ... - def beginning_offsets(self, partitions: Any, timeout_ms: Any): ... - def end_offsets(self, partitions: Any, timeout_ms: Any): ... - def beginning_or_end_offset(self, partitions: Any, timestamp: Any, timeout_ms: Any): ... - def fetched_records(self, max_records: Optional[Any] = ..., update_offsets: bool = ...): ... - def __iter__(self) -> Any: ... - def __next__(self): ... - class PartitionRecords: - fetch_offset: Any = ... - topic_partition: Any = ... - messages: Any = ... - message_idx: Any = ... - def __init__(self, fetch_offset: Any, tp: Any, messages: Any) -> None: ... - def __len__(self): ... - def discard(self) -> None: ... - def take(self, n: Optional[Any] = ...): ... - -class FetchResponseMetricAggregator: - sensors: Any = ... - unrecorded_partitions: Any = ... - total_bytes: int = ... - total_records: int = ... - def __init__(self, sensors: Any, partitions: Any) -> None: ... - def record(self, partition: Any, num_bytes: Any, num_records: Any) -> None: ... - -class FetchManagerMetrics: - metrics: Any = ... - group_name: Any = ... - bytes_fetched: Any = ... - records_fetched: Any = ... - fetch_latency: Any = ... - records_fetch_lag: Any = ... - fetch_throttle_time_sensor: Any = ... - def __init__(self, metrics: Any, prefix: Any) -> None: ... - def record_topic_fetch_metrics(self, topic: Any, num_bytes: Any, num_records: Any) -> None: ... diff --git a/stubs/kafka/consumer/group.pyi b/stubs/kafka/consumer/group.pyi deleted file mode 100644 index d70e8732..00000000 --- a/stubs/kafka/consumer/group.pyi +++ /dev/null @@ -1,49 +0,0 @@ -import six -from kafka.client_async import KafkaClient as KafkaClient -from kafka.consumer.fetcher import Fetcher as Fetcher -from kafka.consumer.subscription_state import SubscriptionState as SubscriptionState -from kafka.coordinator.assignors.range import RangePartitionAssignor as RangePartitionAssignor -from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor as RoundRobinPartitionAssignor -from kafka.coordinator.consumer import ConsumerCoordinator as ConsumerCoordinator -from kafka.errors import KafkaConfigurationError as KafkaConfigurationError, UnsupportedVersionError as UnsupportedVersionError -from kafka.metrics import MetricConfig as MetricConfig, Metrics as Metrics -from kafka.protocol.offset import OffsetResetStrategy as OffsetResetStrategy -from kafka.structs import TopicPartition as TopicPartition -from typing import Any, Optional - -log: Any - -class KafkaConsumer(six.Iterator): - DEFAULT_CONFIG: Any = ... - DEFAULT_SESSION_TIMEOUT_MS_0_9: int = ... - config: Any = ... - def __init__(self, *topics: Any, **configs: Any) -> None: ... - def bootstrap_connected(self): ... - def assign(self, partitions: Any) -> None: ... - def assignment(self): ... - def close(self, autocommit: bool = ...) -> None: ... - def commit_async(self, offsets: Optional[Any] = ..., callback: Optional[Any] = ...): ... - def commit(self, offsets: Optional[Any] = ...) -> None: ... - def committed(self, partition: Any, metadata: bool = ...): ... - def topics(self): ... - def partitions_for_topic(self, topic: Any): ... - def poll(self, timeout_ms: int = ..., max_records: Optional[Any] = ..., update_offsets: bool = ...): ... - def position(self, partition: Any): ... - def highwater(self, partition: Any): ... - def pause(self, *partitions: Any) -> None: ... - def paused(self): ... - def resume(self, *partitions: Any) -> None: ... - def seek(self, partition: Any, offset: Any) -> None: ... - def seek_to_beginning(self, *partitions: Any) -> None: ... - def seek_to_end(self, *partitions: Any) -> None: ... - def subscribe(self, topics: Any = ..., pattern: Optional[Any] = ..., listener: Optional[Any] = ...) -> None: ... - def subscription(self): ... - def unsubscribe(self) -> None: ... - def metrics(self, raw: bool = ...): ... - def offsets_for_times(self, timestamps: Any): ... - def beginning_offsets(self, partitions: Any): ... - def end_offsets(self, partitions: Any): ... - def __iter__(self) -> Any: ... - def __next__(self): ... - def next_v2(self): ... - def next_v1(self): ... diff --git a/stubs/kafka/consumer/subscription_state.pyi b/stubs/kafka/consumer/subscription_state.pyi deleted file mode 100644 index 189fa230..00000000 --- a/stubs/kafka/consumer/subscription_state.pyi +++ /dev/null @@ -1,63 +0,0 @@ -import abc -from kafka.errors import IllegalStateError as IllegalStateError -from kafka.protocol.offset import OffsetResetStrategy as OffsetResetStrategy -from kafka.structs import OffsetAndMetadata as OffsetAndMetadata -from typing import Any, Optional - -log: Any - -class SubscriptionState: - subscription: Any = ... - subscribed_pattern: Any = ... - assignment: Any = ... - listener: Any = ... - needs_fetch_committed_offsets: bool = ... - def __init__(self, offset_reset_strategy: str = ...) -> None: ... - def subscribe(self, topics: Any = ..., pattern: Optional[Any] = ..., listener: Optional[Any] = ...) -> None: ... - def change_subscription(self, topics: Any) -> None: ... - def group_subscribe(self, topics: Any) -> None: ... - def reset_group_subscription(self) -> None: ... - def assign_from_user(self, partitions: Any) -> None: ... - def assign_from_subscribed(self, assignments: Any) -> None: ... - def unsubscribe(self) -> None: ... - def group_subscription(self): ... - def seek(self, partition: Any, offset: Any) -> None: ... - def assigned_partitions(self): ... - def paused_partitions(self): ... - def fetchable_partitions(self): ... - def partitions_auto_assigned(self): ... - def all_consumed_offsets(self): ... - def need_offset_reset(self, partition: Any, offset_reset_strategy: Optional[Any] = ...) -> None: ... - def has_default_offset_reset_policy(self): ... - def is_offset_reset_needed(self, partition: Any): ... - def has_all_fetch_positions(self): ... - def missing_fetch_positions(self): ... - def is_assigned(self, partition: Any): ... - def is_paused(self, partition: Any): ... - def is_fetchable(self, partition: Any): ... - def pause(self, partition: Any) -> None: ... - def resume(self, partition: Any) -> None: ... - -class TopicPartitionState: - committed: Any = ... - has_valid_position: bool = ... - paused: bool = ... - awaiting_reset: bool = ... - reset_strategy: Any = ... - highwater: Any = ... - drop_pending_message_set: bool = ... - last_offset_from_message_batch: Any = ... - def __init__(self) -> None: ... - position: Any = ... - def await_reset(self, strategy: Any) -> None: ... - def seek(self, offset: Any) -> None: ... - def pause(self) -> None: ... - def resume(self) -> None: ... - def is_fetchable(self): ... - -class ConsumerRebalanceListener(metaclass=abc.ABCMeta): - __metaclass__: Any = ... - @abc.abstractmethod - def on_partitions_revoked(self, revoked: Any) -> Any: ... - @abc.abstractmethod - def on_partitions_assigned(self, assigned: Any) -> Any: ... diff --git a/stubs/kafka/coordinator/__init__.pyi b/stubs/kafka/coordinator/__init__.pyi deleted file mode 100644 index e69de29b..00000000 diff --git a/stubs/kafka/coordinator/assignors/__init__.pyi b/stubs/kafka/coordinator/assignors/__init__.pyi deleted file mode 100644 index e69de29b..00000000 diff --git a/stubs/kafka/coordinator/assignors/abstract.pyi b/stubs/kafka/coordinator/assignors/abstract.pyi deleted file mode 100644 index 00a5b377..00000000 --- a/stubs/kafka/coordinator/assignors/abstract.pyi +++ /dev/null @@ -1,15 +0,0 @@ -import abc -from typing import Any - -log: Any - -class AbstractPartitionAssignor(metaclass=abc.ABCMeta): - @property - @abc.abstractmethod - def name(self) -> Any: ... - @abc.abstractmethod - def assign(self, cluster: Any, members: Any) -> Any: ... - @abc.abstractmethod - def metadata(self, topics: Any) -> Any: ... - @abc.abstractmethod - def on_assignment(self, assignment: Any) -> Any: ... diff --git a/stubs/kafka/coordinator/assignors/range.pyi b/stubs/kafka/coordinator/assignors/range.pyi deleted file mode 100644 index 57e35cab..00000000 --- a/stubs/kafka/coordinator/assignors/range.pyi +++ /dev/null @@ -1,15 +0,0 @@ -from kafka.coordinator.assignors.abstract import AbstractPartitionAssignor as AbstractPartitionAssignor -from kafka.coordinator.protocol import ConsumerProtocolMemberAssignment as ConsumerProtocolMemberAssignment, ConsumerProtocolMemberMetadata as ConsumerProtocolMemberMetadata -from typing import Any - -log: Any - -class RangePartitionAssignor(AbstractPartitionAssignor): - name: str = ... - version: int = ... - @classmethod - def assign(cls, cluster: Any, member_metadata: Any): ... - @classmethod - def metadata(cls, topics: Any): ... - @classmethod - def on_assignment(cls, assignment: Any) -> None: ... diff --git a/stubs/kafka/coordinator/assignors/roundrobin.pyi b/stubs/kafka/coordinator/assignors/roundrobin.pyi deleted file mode 100644 index fdca1cfd..00000000 --- a/stubs/kafka/coordinator/assignors/roundrobin.pyi +++ /dev/null @@ -1,16 +0,0 @@ -from kafka.coordinator.assignors.abstract import AbstractPartitionAssignor as AbstractPartitionAssignor -from kafka.coordinator.protocol import ConsumerProtocolMemberAssignment as ConsumerProtocolMemberAssignment, ConsumerProtocolMemberMetadata as ConsumerProtocolMemberMetadata -from kafka.structs import TopicPartition as TopicPartition -from typing import Any - -log: Any - -class RoundRobinPartitionAssignor(AbstractPartitionAssignor): - name: str = ... - version: int = ... - @classmethod - def assign(cls, cluster: Any, member_metadata: Any): ... - @classmethod - def metadata(cls, topics: Any): ... - @classmethod - def on_assignment(cls, assignment: Any) -> None: ... diff --git a/stubs/kafka/coordinator/base.pyi b/stubs/kafka/coordinator/base.pyi deleted file mode 100644 index 5eda2358..00000000 --- a/stubs/kafka/coordinator/base.pyi +++ /dev/null @@ -1,79 +0,0 @@ -import abc -import threading -from kafka import errors as Errors -from kafka.coordinator.heartbeat import Heartbeat as Heartbeat -from kafka.future import Future as Future -from kafka.metrics import AnonMeasurable as AnonMeasurable -from kafka.metrics.stats import Avg as Avg, Count as Count, Max as Max, Rate as Rate -from kafka.protocol.commit import GroupCoordinatorRequest as GroupCoordinatorRequest, OffsetCommitRequest as OffsetCommitRequest -from kafka.protocol.group import HeartbeatRequest as HeartbeatRequest, JoinGroupRequest as JoinGroupRequest, LeaveGroupRequest as LeaveGroupRequest, SyncGroupRequest as SyncGroupRequest -from typing import Any, Optional - -log: Any - -class MemberState: - UNJOINED: str = ... - REBALANCING: str = ... - STABLE: str = ... - -class Generation: - generation_id: Any = ... - member_id: Any = ... - protocol: Any = ... - def __init__(self, generation_id: Any, member_id: Any, protocol: Any) -> None: ... - -class UnjoinedGroupException(Errors.KafkaError): - retriable: bool = ... - -class BaseCoordinator(metaclass=abc.ABCMeta): - DEFAULT_CONFIG: Any = ... - config: Any = ... - group_id: Any = ... - heartbeat: Any = ... - rejoin_needed: bool = ... - rejoining: bool = ... - state: Any = ... - join_future: Any = ... - coordinator_id: Any = ... - sensors: Any = ... - def __init__(self, client: Any, metrics: Any, **configs: Any) -> None: ... - @abc.abstractmethod - def protocol_type(self) -> Any: ... - @abc.abstractmethod - def group_protocols(self) -> Any: ... - def coordinator_unknown(self): ... - def coordinator(self): ... - def ensure_coordinator_ready(self) -> None: ... - def lookup_coordinator(self): ... - def need_rejoin(self): ... - def poll_heartbeat(self) -> None: ... - def time_to_next_heartbeat(self): ... - def ensure_active_group(self) -> None: ... - def coordinator_dead(self, error: Any) -> None: ... - def generation(self): ... - def reset_generation(self) -> None: ... - def request_rejoin(self) -> None: ... - def __del__(self) -> None: ... - def close(self) -> None: ... - def maybe_leave_group(self) -> None: ... - -class GroupCoordinatorMetrics: - heartbeat: Any = ... - metrics: Any = ... - metric_group_name: Any = ... - heartbeat_latency: Any = ... - join_latency: Any = ... - sync_latency: Any = ... - def __init__(self, heartbeat: Any, metrics: Any, prefix: Any, tags: Optional[Any] = ...): ... - -class HeartbeatThread(threading.Thread): - name: Any = ... - coordinator: Any = ... - enabled: bool = ... - closed: bool = ... - failed: Any = ... - def __init__(self, coordinator: Any) -> None: ... - def enable(self) -> None: ... - def disable(self) -> None: ... - def close(self) -> None: ... - def run(self) -> None: ... diff --git a/stubs/kafka/coordinator/consumer.pyi b/stubs/kafka/coordinator/consumer.pyi deleted file mode 100644 index a3b08f74..00000000 --- a/stubs/kafka/coordinator/consumer.pyi +++ /dev/null @@ -1,38 +0,0 @@ -from kafka.coordinator.assignors.range import RangePartitionAssignor as RangePartitionAssignor -from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor as RoundRobinPartitionAssignor -from kafka.coordinator.base import BaseCoordinator as BaseCoordinator, Generation as Generation -from kafka.coordinator.protocol import ConsumerProtocol as ConsumerProtocol -from kafka.future import Future as Future -from kafka.metrics import AnonMeasurable as AnonMeasurable -from kafka.metrics.stats import Avg as Avg, Count as Count, Max as Max, Rate as Rate -from kafka.protocol.commit import OffsetCommitRequest as OffsetCommitRequest, OffsetFetchRequest as OffsetFetchRequest -from kafka.structs import OffsetAndMetadata as OffsetAndMetadata, TopicPartition as TopicPartition -from typing import Any, Optional - -log: Any - -class ConsumerCoordinator(BaseCoordinator): - DEFAULT_CONFIG: Any = ... - config: Any = ... - auto_commit_interval: Any = ... - next_auto_commit_deadline: Any = ... - completed_offset_commits: Any = ... - consumer_sensors: Any = ... - def __init__(self, client: Any, subscription: Any, metrics: Any, **configs: Any) -> None: ... - def __del__(self) -> None: ... - def protocol_type(self): ... - def group_protocols(self): ... - def poll(self) -> None: ... - def time_to_next_poll(self): ... - def need_rejoin(self): ... - def refresh_committed_offsets_if_needed(self) -> None: ... - def fetch_committed_offsets(self, partitions: Any): ... - def close(self, autocommit: bool = ...) -> None: ... - def commit_offsets_async(self, offsets: Any, callback: Optional[Any] = ...): ... - def commit_offsets_sync(self, offsets: Any): ... - -class ConsumerCoordinatorMetrics: - metrics: Any = ... - metric_group_name: Any = ... - commit_latency: Any = ... - def __init__(self, metrics: Any, metric_group_prefix: Any, subscription: Any): ... diff --git a/stubs/kafka/coordinator/heartbeat.pyi b/stubs/kafka/coordinator/heartbeat.pyi deleted file mode 100644 index b27c7410..00000000 --- a/stubs/kafka/coordinator/heartbeat.pyi +++ /dev/null @@ -1,20 +0,0 @@ -from typing import Any - -class Heartbeat: - DEFAULT_CONFIG: Any = ... - config: Any = ... - last_send: Any = ... - last_receive: Any = ... - last_poll: Any = ... - last_reset: Any = ... - heartbeat_failed: Any = ... - def __init__(self, **configs: Any) -> None: ... - def poll(self) -> None: ... - def sent_heartbeat(self) -> None: ... - def fail_heartbeat(self) -> None: ... - def received_heartbeat(self) -> None: ... - def time_to_next_heartbeat(self): ... - def should_heartbeat(self): ... - def session_timeout_expired(self): ... - def reset_timeouts(self) -> None: ... - def poll_timeout_expired(self): ... diff --git a/stubs/kafka/coordinator/protocol.pyi b/stubs/kafka/coordinator/protocol.pyi deleted file mode 100644 index 50f7c899..00000000 --- a/stubs/kafka/coordinator/protocol.pyi +++ /dev/null @@ -1,17 +0,0 @@ -from kafka.protocol.struct import Struct as Struct -from kafka.protocol.types import Array as Array, Bytes as Bytes, Int16 as Int16, Int32 as Int32, Schema as Schema, String as String -from kafka.structs import TopicPartition as TopicPartition -from typing import Any - -class ConsumerProtocolMemberMetadata(Struct): - SCHEMA: Any = ... - -class ConsumerProtocolMemberAssignment(Struct): - SCHEMA: Any = ... - def partitions(self): ... - -class ConsumerProtocol: - PROTOCOL_TYPE: str = ... - ASSIGNMENT_STRATEGIES: Any = ... - METADATA: Any = ... - ASSIGNMENT: Any = ... diff --git a/stubs/kafka/errors.pyi b/stubs/kafka/errors.pyi deleted file mode 100644 index 611dd6c6..00000000 --- a/stubs/kafka/errors.pyi +++ /dev/null @@ -1,327 +0,0 @@ -from typing import Any - -class KafkaError(RuntimeError): - retriable: bool = ... - invalid_metadata: bool = ... - -class IllegalStateError(KafkaError): ... -class IllegalArgumentError(KafkaError): ... - -class NoBrokersAvailable(KafkaError): - retriable: bool = ... - invalid_metadata: bool = ... - -class NodeNotReadyError(KafkaError): - retriable: bool = ... - -class KafkaProtocolError(KafkaError): - retriable: bool = ... - -class CorrelationIdError(KafkaProtocolError): - retriable: bool = ... - -class Cancelled(KafkaError): - retriable: bool = ... - -class TooManyInFlightRequests(KafkaError): - retriable: bool = ... - -class StaleMetadata(KafkaError): - retriable: bool = ... - invalid_metadata: bool = ... - -class MetadataEmptyBrokerList(KafkaError): - retriable: bool = ... - -class UnrecognizedBrokerVersion(KafkaError): ... -class IncompatibleBrokerVersion(KafkaError): ... - -class CommitFailedError(KafkaError): - def __init__(self, *args: Any, **kwargs: Any) -> None: ... - -class AuthenticationMethodNotSupported(KafkaError): ... - -class AuthenticationFailedError(KafkaError): - retriable: bool = ... - -class BrokerResponseError(KafkaError): - errno: Any = ... - message: Any = ... - description: Any = ... - -class NoError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class UnknownError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class OffsetOutOfRangeError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class CorruptRecordException(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... -InvalidMessageError = CorruptRecordException - -class UnknownTopicOrPartitionError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - retriable: bool = ... - invalid_metadata: bool = ... - -class InvalidFetchRequestError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class LeaderNotAvailableError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - retriable: bool = ... - invalid_metadata: bool = ... - -class NotLeaderForPartitionError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - retriable: bool = ... - invalid_metadata: bool = ... - -class RequestTimedOutError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - retriable: bool = ... - -class BrokerNotAvailableError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class ReplicaNotAvailableError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class MessageSizeTooLargeError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class StaleControllerEpochError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class OffsetMetadataTooLargeError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class StaleLeaderEpochCodeError(BrokerResponseError): - errno: int = ... - message: str = ... - -class GroupLoadInProgressError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - retriable: bool = ... - -class GroupCoordinatorNotAvailableError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - retriable: bool = ... - -class NotCoordinatorForGroupError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - retriable: bool = ... - -class InvalidTopicError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class RecordListTooLargeError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class NotEnoughReplicasError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - retriable: bool = ... - -class NotEnoughReplicasAfterAppendError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - retriable: bool = ... - -class InvalidRequiredAcksError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class IllegalGenerationError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class InconsistentGroupProtocolError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class InvalidGroupIdError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class UnknownMemberIdError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class InvalidSessionTimeoutError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class RebalanceInProgressError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class InvalidCommitOffsetSizeError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class TopicAuthorizationFailedError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class GroupAuthorizationFailedError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class ClusterAuthorizationFailedError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class InvalidTimestampError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class UnsupportedSaslMechanismError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class IllegalSaslStateError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class UnsupportedVersionError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class TopicAlreadyExistsError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class InvalidPartitionsError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class InvalidReplicationFactorError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class InvalidReplicationAssignmentError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class InvalidConfigurationError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class NotControllerError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - retriable: bool = ... - -class InvalidRequestError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class UnsupportedForMessageFormatError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class PolicyViolationError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class SecurityDisabledError(BrokerResponseError): - errno: int = ... - message: str = ... - description: str = ... - -class KafkaUnavailableError(KafkaError): ... -class KafkaTimeoutError(KafkaError): ... - -class FailedPayloadsError(KafkaError): - payload: Any = ... - def __init__(self, payload: Any, *args: Any) -> None: ... - -class KafkaConnectionError(KafkaError): - retriable: bool = ... - invalid_metadata: bool = ... - -class ProtocolError(KafkaError): ... -class UnsupportedCodecError(KafkaError): ... -class KafkaConfigurationError(KafkaError): ... -class QuotaViolationError(KafkaError): ... - -class AsyncProducerQueueFull(KafkaError): - failed_msgs: Any = ... - def __init__(self, failed_msgs: Any, *args: Any) -> None: ... - -kafka_errors: Any - -def for_code(error_code: Any): ... -def check_error(response: Any) -> None: ... - -RETRY_BACKOFF_ERROR_TYPES: Any -RETRY_REFRESH_ERROR_TYPES: Any -RETRY_ERROR_TYPES: Any diff --git a/stubs/kafka/future.pyi b/stubs/kafka/future.pyi deleted file mode 100644 index 09b99d5d..00000000 --- a/stubs/kafka/future.pyi +++ /dev/null @@ -1,19 +0,0 @@ -from typing import Any - -log: Any - -class Future: - error_on_callbacks: bool = ... - is_done: bool = ... - value: Any = ... - exception: Any = ... - def __init__(self) -> None: ... - def succeeded(self): ... - def failed(self): ... - def retriable(self): ... - def success(self, value: Any): ... - def failure(self, e: Any): ... - def add_callback(self, f: Any, *args: Any, **kwargs: Any): ... - def add_errback(self, f: Any, *args: Any, **kwargs: Any): ... - def add_both(self, f: Any, *args: Any, **kwargs: Any): ... - def chain(self, future: Any): ... diff --git a/stubs/kafka/metrics/__init__.pyi b/stubs/kafka/metrics/__init__.pyi deleted file mode 100644 index 68c2f588..00000000 --- a/stubs/kafka/metrics/__init__.pyi +++ /dev/null @@ -1,8 +0,0 @@ -from kafka.metrics.compound_stat import NamedMeasurable as NamedMeasurable -from kafka.metrics.dict_reporter import DictReporter as DictReporter -from kafka.metrics.kafka_metric import KafkaMetric as KafkaMetric -from kafka.metrics.measurable import AnonMeasurable as AnonMeasurable -from kafka.metrics.metric_config import MetricConfig as MetricConfig -from kafka.metrics.metric_name import MetricName as MetricName -from kafka.metrics.metrics import Metrics as Metrics -from kafka.metrics.quota import Quota as Quota diff --git a/stubs/kafka/metrics/compound_stat.pyi b/stubs/kafka/metrics/compound_stat.pyi deleted file mode 100644 index 521ab419..00000000 --- a/stubs/kafka/metrics/compound_stat.pyi +++ /dev/null @@ -1,14 +0,0 @@ -import abc -from kafka.metrics.stat import AbstractStat as AbstractStat -from typing import Any - -class AbstractCompoundStat(AbstractStat, metaclass=abc.ABCMeta): - __metaclass__: Any = ... - def stats(self) -> None: ... - -class NamedMeasurable: - def __init__(self, metric_name: Any, measurable_stat: Any) -> None: ... - @property - def name(self): ... - @property - def stat(self): ... diff --git a/stubs/kafka/metrics/dict_reporter.pyi b/stubs/kafka/metrics/dict_reporter.pyi deleted file mode 100644 index 7d33d0b6..00000000 --- a/stubs/kafka/metrics/dict_reporter.pyi +++ /dev/null @@ -1,14 +0,0 @@ -from kafka.metrics.metrics_reporter import AbstractMetricsReporter as AbstractMetricsReporter -from typing import Any - -logger: Any - -class DictReporter(AbstractMetricsReporter): - def __init__(self, prefix: str = ...) -> None: ... - def snapshot(self): ... - def init(self, metrics: Any) -> None: ... - def metric_change(self, metric: Any) -> None: ... - def metric_removal(self, metric: Any): ... - def get_category(self, metric: Any): ... - def configure(self, configs: Any) -> None: ... - def close(self) -> None: ... diff --git a/stubs/kafka/metrics/kafka_metric.pyi b/stubs/kafka/metrics/kafka_metric.pyi deleted file mode 100644 index 63345c00..00000000 --- a/stubs/kafka/metrics/kafka_metric.pyi +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Any, Optional - -class KafkaMetric: - def __init__(self, metric_name: Any, measurable: Any, config: Any) -> None: ... - @property - def metric_name(self): ... - @property - def measurable(self): ... - @property - def config(self): ... - @config.setter - def config(self, config: Any) -> None: ... - def value(self, time_ms: Optional[Any] = ...): ... diff --git a/stubs/kafka/metrics/measurable.pyi b/stubs/kafka/metrics/measurable.pyi deleted file mode 100644 index c029a178..00000000 --- a/stubs/kafka/metrics/measurable.pyi +++ /dev/null @@ -1,10 +0,0 @@ -import abc -from typing import Any - -class AbstractMeasurable(metaclass=abc.ABCMeta): - @abc.abstractmethod - def measure(self, config: Any, now: Any) -> Any: ... - -class AnonMeasurable(AbstractMeasurable): - def __init__(self, measure_fn: Any) -> None: ... - def measure(self, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/measurable_stat.pyi b/stubs/kafka/metrics/measurable_stat.pyi deleted file mode 100644 index e88729fb..00000000 --- a/stubs/kafka/metrics/measurable_stat.pyi +++ /dev/null @@ -1,7 +0,0 @@ -import abc -from kafka.metrics.measurable import AbstractMeasurable as AbstractMeasurable -from kafka.metrics.stat import AbstractStat as AbstractStat -from typing import Any - -class AbstractMeasurableStat(AbstractStat, AbstractMeasurable, metaclass=abc.ABCMeta): - __metaclass__: Any = ... diff --git a/stubs/kafka/metrics/metric_config.pyi b/stubs/kafka/metrics/metric_config.pyi deleted file mode 100644 index a996e039..00000000 --- a/stubs/kafka/metrics/metric_config.pyi +++ /dev/null @@ -1,12 +0,0 @@ -from typing import Any, Optional - -class MetricConfig: - quota: Any = ... - event_window: Any = ... - time_window_ms: Any = ... - tags: Any = ... - def __init__(self, quota: Optional[Any] = ..., samples: int = ..., event_window: Any = ..., time_window_ms: Any = ..., tags: Optional[Any] = ...) -> None: ... - @property - def samples(self): ... - @samples.setter - def samples(self, value: Any) -> None: ... diff --git a/stubs/kafka/metrics/metric_name.pyi b/stubs/kafka/metrics/metric_name.pyi deleted file mode 100644 index 224c7423..00000000 --- a/stubs/kafka/metrics/metric_name.pyi +++ /dev/null @@ -1,15 +0,0 @@ -from typing import Any, Optional - -class MetricName: - def __init__(self, name: Any, group: Any, description: Optional[Any] = ..., tags: Optional[Any] = ...) -> None: ... - @property - def name(self): ... - @property - def group(self): ... - @property - def description(self): ... - @property - def tags(self): ... - def __hash__(self) -> Any: ... - def __eq__(self, other: Any) -> Any: ... - def __ne__(self, other: Any) -> Any: ... diff --git a/stubs/kafka/metrics/metrics.pyi b/stubs/kafka/metrics/metrics.pyi deleted file mode 100644 index c23fcc03..00000000 --- a/stubs/kafka/metrics/metrics.pyi +++ /dev/null @@ -1,23 +0,0 @@ -from kafka.metrics import AnonMeasurable as AnonMeasurable, KafkaMetric as KafkaMetric, MetricConfig as MetricConfig, MetricName as MetricName -from typing import Any, Optional - -logger: Any - -class Metrics: - def __init__(self, default_config: Optional[Any] = ..., reporters: Optional[Any] = ..., enable_expiration: bool = ...): ... - @property - def config(self): ... - @property - def metrics(self): ... - def metric_name(self, name: Any, group: Any, description: str = ..., tags: Optional[Any] = ...): ... - def get_sensor(self, name: Any): ... - def sensor(self, name: Any, config: Optional[Any] = ..., inactive_sensor_expiration_time_seconds: Any = ..., parents: Optional[Any] = ...): ... - def remove_sensor(self, name: Any) -> None: ... - def add_metric(self, metric_name: Any, measurable: Any, config: Optional[Any] = ...) -> None: ... - def remove_metric(self, metric_name: Any): ... - def add_reporter(self, reporter: Any) -> None: ... - def register_metric(self, metric: Any) -> None: ... - class ExpireSensorTask: - @staticmethod - def run(metrics: Any) -> None: ... - def close(self) -> None: ... diff --git a/stubs/kafka/metrics/metrics_reporter.pyi b/stubs/kafka/metrics/metrics_reporter.pyi deleted file mode 100644 index 804d7fda..00000000 --- a/stubs/kafka/metrics/metrics_reporter.pyi +++ /dev/null @@ -1,15 +0,0 @@ -import abc -from typing import Any - -class AbstractMetricsReporter(metaclass=abc.ABCMeta): - __metaclass__: Any = ... - @abc.abstractmethod - def init(self, metrics: Any) -> Any: ... - @abc.abstractmethod - def metric_change(self, metric: Any) -> Any: ... - @abc.abstractmethod - def metric_removal(self, metric: Any) -> Any: ... - @abc.abstractmethod - def configure(self, configs: Any) -> Any: ... - @abc.abstractmethod - def close(self) -> Any: ... diff --git a/stubs/kafka/metrics/quota.pyi b/stubs/kafka/metrics/quota.pyi deleted file mode 100644 index 7c45af7e..00000000 --- a/stubs/kafka/metrics/quota.pyi +++ /dev/null @@ -1,15 +0,0 @@ -from typing import Any - -class Quota: - def __init__(self, bound: Any, is_upper: Any) -> None: ... - @staticmethod - def upper_bound(upper_bound: Any): ... - @staticmethod - def lower_bound(lower_bound: Any): ... - def is_upper_bound(self): ... - @property - def bound(self): ... - def is_acceptable(self, value: Any): ... - def __hash__(self) -> Any: ... - def __eq__(self, other: Any) -> Any: ... - def __ne__(self, other: Any) -> Any: ... diff --git a/stubs/kafka/metrics/stat.pyi b/stubs/kafka/metrics/stat.pyi deleted file mode 100644 index 215e6fb1..00000000 --- a/stubs/kafka/metrics/stat.pyi +++ /dev/null @@ -1,7 +0,0 @@ -import abc -from typing import Any - -class AbstractStat(metaclass=abc.ABCMeta): - __metaclass__: Any = ... - @abc.abstractmethod - def record(self, config: Any, value: Any, time_ms: Any) -> Any: ... diff --git a/stubs/kafka/metrics/stats/__init__.pyi b/stubs/kafka/metrics/stats/__init__.pyi deleted file mode 100644 index c81d71aa..00000000 --- a/stubs/kafka/metrics/stats/__init__.pyi +++ /dev/null @@ -1,10 +0,0 @@ -from kafka.metrics.stats.avg import Avg as Avg -from kafka.metrics.stats.count import Count as Count -from kafka.metrics.stats.histogram import Histogram as Histogram -from kafka.metrics.stats.max_stat import Max as Max -from kafka.metrics.stats.min_stat import Min as Min -from kafka.metrics.stats.percentile import Percentile as Percentile -from kafka.metrics.stats.percentiles import Percentiles as Percentiles -from kafka.metrics.stats.rate import Rate as Rate -from kafka.metrics.stats.sensor import Sensor as Sensor -from kafka.metrics.stats.total import Total as Total diff --git a/stubs/kafka/metrics/stats/avg.pyi b/stubs/kafka/metrics/stats/avg.pyi deleted file mode 100644 index f4472b2b..00000000 --- a/stubs/kafka/metrics/stats/avg.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat -from typing import Any - -class Avg(AbstractSampledStat): - def __init__(self) -> None: ... - def update(self, sample: Any, config: Any, value: Any, now: Any) -> None: ... - def combine(self, samples: Any, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/stats/count.pyi b/stubs/kafka/metrics/stats/count.pyi deleted file mode 100644 index 6c473c0e..00000000 --- a/stubs/kafka/metrics/stats/count.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat -from typing import Any - -class Count(AbstractSampledStat): - def __init__(self) -> None: ... - def update(self, sample: Any, config: Any, value: Any, now: Any) -> None: ... - def combine(self, samples: Any, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/stats/histogram.pyi b/stubs/kafka/metrics/stats/histogram.pyi deleted file mode 100644 index 610640d3..00000000 --- a/stubs/kafka/metrics/stats/histogram.pyi +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Any - -class Histogram: - def __init__(self, bin_scheme: Any) -> None: ... - def record(self, value: Any) -> None: ... - def value(self, quantile: Any): ... - @property - def counts(self): ... - def clear(self) -> None: ... - class ConstantBinScheme: - def __init__(self, bins: Any, min_val: Any, max_val: Any) -> None: ... - @property - def bins(self): ... - def from_bin(self, b: Any): ... - def to_bin(self, x: Any): ... - class LinearBinScheme: - def __init__(self, num_bins: Any, max_val: Any) -> None: ... - @property - def bins(self): ... - def from_bin(self, b: Any): ... - def to_bin(self, x: Any): ... diff --git a/stubs/kafka/metrics/stats/max_stat.pyi b/stubs/kafka/metrics/stats/max_stat.pyi deleted file mode 100644 index 62ec9c75..00000000 --- a/stubs/kafka/metrics/stats/max_stat.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat -from typing import Any - -class Max(AbstractSampledStat): - def __init__(self) -> None: ... - def update(self, sample: Any, config: Any, value: Any, now: Any) -> None: ... - def combine(self, samples: Any, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/stats/min_stat.pyi b/stubs/kafka/metrics/stats/min_stat.pyi deleted file mode 100644 index a286eeb9..00000000 --- a/stubs/kafka/metrics/stats/min_stat.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat -from typing import Any - -class Min(AbstractSampledStat): - def __init__(self) -> None: ... - def update(self, sample: Any, config: Any, value: Any, now: Any) -> None: ... - def combine(self, samples: Any, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/stats/percentile.pyi b/stubs/kafka/metrics/stats/percentile.pyi deleted file mode 100644 index d33618c7..00000000 --- a/stubs/kafka/metrics/stats/percentile.pyi +++ /dev/null @@ -1,8 +0,0 @@ -from typing import Any - -class Percentile: - def __init__(self, metric_name: Any, percentile: Any) -> None: ... - @property - def name(self): ... - @property - def percentile(self): ... diff --git a/stubs/kafka/metrics/stats/percentiles.pyi b/stubs/kafka/metrics/stats/percentiles.pyi deleted file mode 100644 index 5e142167..00000000 --- a/stubs/kafka/metrics/stats/percentiles.pyi +++ /dev/null @@ -1,21 +0,0 @@ -from kafka.metrics import AnonMeasurable as AnonMeasurable, NamedMeasurable as NamedMeasurable -from kafka.metrics.compound_stat import AbstractCompoundStat as AbstractCompoundStat -from kafka.metrics.stats import Histogram as Histogram -from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat -from typing import Any, Optional - -class BucketSizing: - CONSTANT: int = ... - LINEAR: int = ... - -class Percentiles(AbstractSampledStat, AbstractCompoundStat): - bin_scheme: Any = ... - def __init__(self, size_in_bytes: Any, bucketing: Any, max_val: Any, min_val: float = ..., percentiles: Optional[Any] = ...) -> None: ... - def stats(self): ... - def value(self, config: Any, now: Any, quantile: Any): ... - def combine(self, samples: Any, config: Any, now: Any): ... - def new_sample(self, time_ms: Any): ... - def update(self, sample: Any, config: Any, value: Any, time_ms: Any) -> None: ... - class HistogramSample(AbstractSampledStat.Sample): - histogram: Any = ... - def __init__(self, scheme: Any, now: Any) -> None: ... diff --git a/stubs/kafka/metrics/stats/rate.pyi b/stubs/kafka/metrics/stats/rate.pyi deleted file mode 100644 index 5c1cedb5..00000000 --- a/stubs/kafka/metrics/stats/rate.pyi +++ /dev/null @@ -1,27 +0,0 @@ -from kafka.metrics.measurable_stat import AbstractMeasurableStat as AbstractMeasurableStat -from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat -from typing import Any, Optional - -class TimeUnit: - NANOSECONDS: Any = ... - MICROSECONDS: Any = ... - MILLISECONDS: Any = ... - SECONDS: Any = ... - MINUTES: Any = ... - HOURS: Any = ... - DAYS: Any = ... - @staticmethod - def get_name(time_unit: Any): ... - -class Rate(AbstractMeasurableStat): - def __init__(self, time_unit: Any = ..., sampled_stat: Optional[Any] = ...) -> None: ... - def unit_name(self): ... - def record(self, config: Any, value: Any, time_ms: Any) -> None: ... - def measure(self, config: Any, now: Any): ... - def window_size(self, config: Any, now: Any): ... - def convert(self, time_ms: Any): ... - -class SampledTotal(AbstractSampledStat): - def __init__(self, initial_value: Optional[Any] = ...) -> None: ... - def update(self, sample: Any, config: Any, value: Any, time_ms: Any) -> None: ... - def combine(self, samples: Any, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/stats/sampled_stat.pyi b/stubs/kafka/metrics/stats/sampled_stat.pyi deleted file mode 100644 index 61e0ed5c..00000000 --- a/stubs/kafka/metrics/stats/sampled_stat.pyi +++ /dev/null @@ -1,25 +0,0 @@ -import abc -from kafka.metrics.measurable_stat import AbstractMeasurableStat as AbstractMeasurableStat -from typing import Any - -class AbstractSampledStat(AbstractMeasurableStat, metaclass=abc.ABCMeta): - __metaclass__: Any = ... - def __init__(self, initial_value: Any) -> None: ... - @abc.abstractmethod - def update(self, sample: Any, config: Any, value: Any, time_ms: Any) -> Any: ... - @abc.abstractmethod - def combine(self, samples: Any, config: Any, now: Any) -> Any: ... - def record(self, config: Any, value: Any, time_ms: Any) -> None: ... - def new_sample(self, time_ms: Any): ... - def measure(self, config: Any, now: Any): ... - def current(self, time_ms: Any): ... - def oldest(self, now: Any): ... - def purge_obsolete_samples(self, config: Any, now: Any) -> None: ... - class Sample: - initial_value: Any = ... - event_count: int = ... - last_window_ms: Any = ... - value: Any = ... - def __init__(self, initial_value: Any, now: Any) -> None: ... - def reset(self, now: Any) -> None: ... - def is_complete(self, time_ms: Any, config: Any): ... diff --git a/stubs/kafka/metrics/stats/sensor.pyi b/stubs/kafka/metrics/stats/sensor.pyi deleted file mode 100644 index e4f9fcb3..00000000 --- a/stubs/kafka/metrics/stats/sensor.pyi +++ /dev/null @@ -1,14 +0,0 @@ -from kafka.errors import QuotaViolationError as QuotaViolationError -from kafka.metrics import KafkaMetric as KafkaMetric -from typing import Any, Optional - -class Sensor: - def __init__(self, registry: Any, name: Any, parents: Any, config: Any, inactive_sensor_expiration_time_seconds: Any) -> None: ... - @property - def name(self): ... - @property - def metrics(self): ... - def record(self, value: float = ..., time_ms: Optional[Any] = ...) -> None: ... - def add_compound(self, compound_stat: Any, config: Optional[Any] = ...) -> None: ... - def add(self, metric_name: Any, stat: Any, config: Optional[Any] = ...) -> None: ... - def has_expired(self): ... diff --git a/stubs/kafka/metrics/stats/total.pyi b/stubs/kafka/metrics/stats/total.pyi deleted file mode 100644 index ef0714ec..00000000 --- a/stubs/kafka/metrics/stats/total.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from kafka.metrics.measurable_stat import AbstractMeasurableStat as AbstractMeasurableStat -from typing import Any - -class Total(AbstractMeasurableStat): - def __init__(self, value: float = ...) -> None: ... - def record(self, config: Any, value: Any, now: Any) -> None: ... - def measure(self, config: Any, now: Any): ... diff --git a/stubs/kafka/oauth/__init__.pyi b/stubs/kafka/oauth/__init__.pyi deleted file mode 100644 index 1280708a..00000000 --- a/stubs/kafka/oauth/__init__.pyi +++ /dev/null @@ -1 +0,0 @@ -from kafka.oauth.abstract import AbstractTokenProvider as AbstractTokenProvider diff --git a/stubs/kafka/oauth/abstract.pyi b/stubs/kafka/oauth/abstract.pyi deleted file mode 100644 index 56fa8a03..00000000 --- a/stubs/kafka/oauth/abstract.pyi +++ /dev/null @@ -1,10 +0,0 @@ -import abc -from typing import Any - -ABC: Any - -class AbstractTokenProvider(ABC, metaclass=abc.ABCMeta): - def __init__(self, **config: Any) -> None: ... - @abc.abstractmethod - def token(self) -> Any: ... - def extensions(self): ... diff --git a/stubs/kafka/partitioner/__init__.pyi b/stubs/kafka/partitioner/__init__.pyi deleted file mode 100644 index 5282ae70..00000000 --- a/stubs/kafka/partitioner/__init__.pyi +++ /dev/null @@ -1 +0,0 @@ -from kafka.partitioner.default import DefaultPartitioner as DefaultPartitioner, murmur2 as murmur2 diff --git a/stubs/kafka/partitioner/default.pyi b/stubs/kafka/partitioner/default.pyi deleted file mode 100644 index 7b1a6328..00000000 --- a/stubs/kafka/partitioner/default.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from typing import Any - -class DefaultPartitioner: - @classmethod - def __call__(cls, key: Any, all_partitions: Any, available: Any): ... - -def murmur2(data: Any): ... diff --git a/stubs/kafka/producer/__init__.pyi b/stubs/kafka/producer/__init__.pyi deleted file mode 100644 index be14dee1..00000000 --- a/stubs/kafka/producer/__init__.pyi +++ /dev/null @@ -1 +0,0 @@ -from kafka.producer.kafka import KafkaProducer as KafkaProducer diff --git a/stubs/kafka/producer/buffer.pyi b/stubs/kafka/producer/buffer.pyi deleted file mode 100644 index 7133f2bf..00000000 --- a/stubs/kafka/producer/buffer.pyi +++ /dev/null @@ -1,9 +0,0 @@ -from kafka.metrics.stats import Rate as Rate -from typing import Any, Optional - -class SimpleBufferPool: - wait_time: Any = ... - def __init__(self, memory: Any, poolable_size: Any, metrics: Optional[Any] = ..., metric_group_prefix: str = ...) -> None: ... - def allocate(self, size: Any, max_time_to_block_ms: Any): ... - def deallocate(self, buf: Any) -> None: ... - def queued(self): ... diff --git a/stubs/kafka/producer/future.pyi b/stubs/kafka/producer/future.pyi deleted file mode 100644 index 48ed0c8e..00000000 --- a/stubs/kafka/producer/future.pyi +++ /dev/null @@ -1,17 +0,0 @@ -from collections import namedtuple -from kafka.future import Future as Future -from typing import Any, Optional - -class FutureProduceResult(Future): - topic_partition: Any = ... - def __init__(self, topic_partition: Any) -> None: ... - def success(self, value: Any): ... - def failure(self, error: Any): ... - def wait(self, timeout: Optional[Any] = ...): ... - -class FutureRecordMetadata(Future): - args: Any = ... - def __init__(self, produce_future: Any, relative_offset: Any, timestamp_ms: Any, checksum: Any, serialized_key_size: Any, serialized_value_size: Any, serialized_header_size: Any) -> None: ... - def get(self, timeout: Optional[Any] = ...): ... - -RecordMetadata = namedtuple('RecordMetadata', ['topic', 'partition', 'topic_partition', 'offset', 'timestamp', 'log_start_offset', 'checksum', 'serialized_key_size', 'serialized_value_size', 'serialized_header_size']) diff --git a/stubs/kafka/producer/kafka.pyi b/stubs/kafka/producer/kafka.pyi deleted file mode 100644 index 2a9f0ad8..00000000 --- a/stubs/kafka/producer/kafka.pyi +++ /dev/null @@ -1,27 +0,0 @@ -from kafka.client_async import KafkaClient as KafkaClient -from kafka.codec import has_gzip as has_gzip, has_lz4 as has_lz4, has_snappy as has_snappy -from kafka.metrics import MetricConfig as MetricConfig, Metrics as Metrics -from kafka.partitioner.default import DefaultPartitioner as DefaultPartitioner -from kafka.producer.future import FutureProduceResult as FutureProduceResult, FutureRecordMetadata as FutureRecordMetadata -from kafka.producer.record_accumulator import AtomicInteger as AtomicInteger, RecordAccumulator as RecordAccumulator -from kafka.producer.sender import Sender as Sender -from kafka.record.default_records import DefaultRecordBatchBuilder as DefaultRecordBatchBuilder -from kafka.record.legacy_records import LegacyRecordBatchBuilder as LegacyRecordBatchBuilder -from kafka.serializer import Serializer as Serializer -from kafka.structs import TopicPartition as TopicPartition -from typing import Any, Optional - -log: Any -PRODUCER_CLIENT_ID_SEQUENCE: Any - -class KafkaProducer: - DEFAULT_CONFIG: Any = ... - config: Any = ... - def __init__(self, **configs: Any) -> None: ... - def bootstrap_connected(self): ... - def __del__(self) -> None: ... - def close(self, timeout: Optional[Any] = ...) -> None: ... - def partitions_for(self, topic: Any): ... - def send(self, topic: Any, value: Optional[Any] = ..., key: Optional[Any] = ..., headers: Optional[Any] = ..., partition: Optional[Any] = ..., timestamp_ms: Optional[Any] = ...): ... - def flush(self, timeout: Optional[Any] = ...) -> None: ... - def metrics(self, raw: bool = ...): ... diff --git a/stubs/kafka/producer/record_accumulator.pyi b/stubs/kafka/producer/record_accumulator.pyi deleted file mode 100644 index 6517dbe1..00000000 --- a/stubs/kafka/producer/record_accumulator.pyi +++ /dev/null @@ -1,56 +0,0 @@ -from kafka.producer.buffer import SimpleBufferPool as SimpleBufferPool -from kafka.producer.future import FutureProduceResult as FutureProduceResult, FutureRecordMetadata as FutureRecordMetadata -from kafka.record.memory_records import MemoryRecordsBuilder as MemoryRecordsBuilder -from kafka.structs import TopicPartition as TopicPartition -from typing import Any, Optional - -log: Any - -class AtomicInteger: - def __init__(self, val: int = ...) -> None: ... - def increment(self): ... - def decrement(self): ... - def get(self): ... - -class ProducerBatch: - max_record_size: int = ... - created: Any = ... - drained: Any = ... - attempts: int = ... - last_attempt: Any = ... - last_append: Any = ... - records: Any = ... - topic_partition: Any = ... - produce_future: Any = ... - def __init__(self, tp: Any, records: Any, buffer: Any) -> None: ... - @property - def record_count(self): ... - def try_append(self, timestamp_ms: Any, key: Any, value: Any, headers: Any): ... - def done(self, base_offset: Optional[Any] = ..., timestamp_ms: Optional[Any] = ..., exception: Optional[Any] = ..., log_start_offset: Optional[int] = ..., global_error: Optional[str] = ...) -> None: ... - def maybe_expire(self, request_timeout_ms: Any, retry_backoff_ms: Any, linger_ms: Any, is_full: Any): ... - def in_retry(self): ... - def set_retry(self) -> None: ... - def buffer(self): ... - -class RecordAccumulator: - DEFAULT_CONFIG: Any = ... - config: Any = ... - muted: Any = ... - def __init__(self, **configs: Any) -> None: ... - def append(self, tp: Any, timestamp_ms: Any, key: Any, value: Any, headers: Any, max_time_to_block_ms: Any, estimated_size: int = ...): ... - def abort_expired_batches(self, request_timeout_ms: Any, cluster: Any): ... - def reenqueue(self, batch: Any) -> None: ... - def ready(self, cluster: Any): ... - def has_unsent(self): ... - def drain(self, cluster: Any, nodes: Any, max_size: Any): ... - def deallocate(self, batch: Any) -> None: ... - def begin_flush(self) -> None: ... - def await_flush_completion(self, timeout: Optional[Any] = ...) -> None: ... - def abort_incomplete_batches(self) -> None: ... - def close(self) -> None: ... - -class IncompleteProducerBatches: - def __init__(self) -> None: ... - def add(self, batch: Any): ... - def remove(self, batch: Any): ... - def all(self): ... diff --git a/stubs/kafka/producer/sender.pyi b/stubs/kafka/producer/sender.pyi deleted file mode 100644 index c458d472..00000000 --- a/stubs/kafka/producer/sender.pyi +++ /dev/null @@ -1,40 +0,0 @@ -import threading -from kafka.metrics.measurable import AnonMeasurable as AnonMeasurable -from kafka.metrics.stats import Avg as Avg, Max as Max, Rate as Rate -from kafka.protocol.produce import ProduceRequest as ProduceRequest -from kafka.structs import TopicPartition as TopicPartition -from typing import Any, Optional - -log: Any - -class Sender(threading.Thread): - DEFAULT_CONFIG: Any = ... - config: Any = ... - name: Any = ... - def __init__(self, client: Any, metadata: Any, accumulator: Any, metrics: Any, **configs: Any) -> None: ... - def run(self) -> None: ... - def run_once(self) -> None: ... - def initiate_close(self) -> None: ... - def force_close(self) -> None: ... - def add_topic(self, topic: Any) -> None: ... - def wakeup(self) -> None: ... - def bootstrap_connected(self): ... - -class SenderMetrics: - metrics: Any = ... - batch_size_sensor: Any = ... - compression_rate_sensor: Any = ... - queue_time_sensor: Any = ... - produce_throttle_time_sensor: Any = ... - records_per_request_sensor: Any = ... - byte_rate_sensor: Any = ... - retry_sensor: Any = ... - error_sensor: Any = ... - max_record_size_sensor: Any = ... - def __init__(self, metrics: Any, client: Any, metadata: Any): ... - def add_metric(self, metric_name: Any, measurable: Any, group_name: str = ..., description: Optional[Any] = ..., tags: Optional[Any] = ..., sensor_name: Optional[Any] = ...) -> None: ... - def maybe_register_topic_metrics(self, topic: Any): ... - def update_produce_request_metrics(self, batches_map: Any) -> None: ... - def record_retries(self, topic: Any, count: Any) -> None: ... - def record_errors(self, topic: Any, count: Any) -> None: ... - def record_throttle_time(self, throttle_time_ms: Any, node: Optional[Any] = ...) -> None: ... diff --git a/stubs/kafka/protocol/__init__.pyi b/stubs/kafka/protocol/__init__.pyi deleted file mode 100644 index e07f07f3..00000000 --- a/stubs/kafka/protocol/__init__.pyi +++ /dev/null @@ -1,3 +0,0 @@ -from typing import Any - -API_KEYS: Any diff --git a/stubs/kafka/protocol/abstract.pyi b/stubs/kafka/protocol/abstract.pyi deleted file mode 100644 index 512edf36..00000000 --- a/stubs/kafka/protocol/abstract.pyi +++ /dev/null @@ -1,11 +0,0 @@ -import abc -from typing import Any - -class AbstractType(metaclass=abc.ABCMeta): - __metaclass__: Any = ... - @abc.abstractmethod - def encode(cls, value: Any) -> Any: ... - @abc.abstractmethod - def decode(cls, data: Any) -> Any: ... - @classmethod - def repr(cls, value: Any): ... diff --git a/stubs/kafka/protocol/admin.pyi b/stubs/kafka/protocol/admin.pyi deleted file mode 100644 index d5bf020a..00000000 --- a/stubs/kafka/protocol/admin.pyi +++ /dev/null @@ -1,427 +0,0 @@ -from kafka.protocol.api import Request as Request, Response as Response -from kafka.protocol.types import Array as Array, Boolean as Boolean, Bytes as Bytes, Int16 as Int16, Int32 as Int32, Int64 as Int64, Int8 as Int8, Schema as Schema, String as String -from typing import Any - -class ApiVersionResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ApiVersionResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ApiVersionResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ApiVersionRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class ApiVersionRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class ApiVersionRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -ApiVersionRequest: Any -ApiVersionResponse: Any - -class CreateTopicsResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class CreateTopicsResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class CreateTopicsResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class CreateTopicsResponse_v3(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class CreateTopicsRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class CreateTopicsRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class CreateTopicsRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class CreateTopicsRequest_v3(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -CreateTopicsRequest: Any -CreateTopicsResponse: Any - -class DeleteTopicsResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DeleteTopicsResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DeleteTopicsResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DeleteTopicsResponse_v3(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DeleteTopicsRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class DeleteTopicsRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class DeleteTopicsRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class DeleteTopicsRequest_v3(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -DeleteTopicsRequest: Any -DeleteTopicsResponse: Any - -class ListGroupsResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ListGroupsResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ListGroupsResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ListGroupsRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class ListGroupsRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class ListGroupsRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -ListGroupsRequest: Any -ListGroupsResponse: Any - -class DescribeGroupsResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DescribeGroupsResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DescribeGroupsResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DescribeGroupsResponse_v3(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DescribeGroupsRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class DescribeGroupsRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class DescribeGroupsRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class DescribeGroupsRequest_v3(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -DescribeGroupsRequest: Any -DescribeGroupsResponse: Any - -class SaslHandShakeResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class SaslHandShakeResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class SaslHandShakeRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class SaslHandShakeRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -SaslHandShakeRequest: Any -SaslHandShakeResponse: Any - -class DescribeAclsResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DescribeAclsResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DescribeAclsRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class DescribeAclsRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -DescribeAclsRequest: Any -DescribeAclsResponse: Any - -class CreateAclsResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class CreateAclsResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class CreateAclsRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class CreateAclsRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -CreateAclsRequest: Any -CreateAclsResponse: Any - -class DeleteAclsResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DeleteAclsResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DeleteAclsRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class DeleteAclsRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -DeleteAclsRequest: Any -DeleteAclsResponse: Any - -class AlterConfigsResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class AlterConfigsResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class AlterConfigsRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class AlterConfigsRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -AlterConfigsRequest: Any -AlterConfigsResponse: Any - -class DescribeConfigsResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DescribeConfigsResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DescribeConfigsResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class DescribeConfigsRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class DescribeConfigsRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class DescribeConfigsRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -DescribeConfigsRequest: Any -DescribeConfigsResponse: Any - -class SaslAuthenticateResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class SaslAuthenticateResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class SaslAuthenticateRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class SaslAuthenticateRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -SaslAuthenticateRequest: Any -SaslAuthenticateResponse: Any - -class CreatePartitionsResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class CreatePartitionsResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class CreatePartitionsRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class CreatePartitionsRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - RESPONSE_TYPE: Any = ... - -CreatePartitionsRequest: Any -CreatePartitionsResponse: Any diff --git a/stubs/kafka/protocol/api.pyi b/stubs/kafka/protocol/api.pyi deleted file mode 100644 index abd5c1ad..00000000 --- a/stubs/kafka/protocol/api.pyi +++ /dev/null @@ -1,38 +0,0 @@ -import abc -from kafka.protocol.struct import Struct as Struct -from kafka.protocol.types import Array as Array, Int16 as Int16, Int32 as Int32, Schema as Schema, String as String -from typing import Any - -class RequestHeader(Struct): - SCHEMA: Any = ... - def __init__(self, request: Any, correlation_id: int = ..., client_id: str = ...) -> None: ... - -class Request(Struct, metaclass=abc.ABCMeta): - __metaclass__: Any = ... - @property - @abc.abstractmethod - def API_KEY(self) -> Any: ... - @property - @abc.abstractmethod - def API_VERSION(self) -> Any: ... - @property - @abc.abstractmethod - def SCHEMA(self) -> Any: ... - @property - @abc.abstractmethod - def RESPONSE_TYPE(self) -> Any: ... - def expect_response(self): ... - def to_object(self): ... - -class Response(Struct, metaclass=abc.ABCMeta): - __metaclass__: Any = ... - @property - @abc.abstractmethod - def API_KEY(self) -> Any: ... - @property - @abc.abstractmethod - def API_VERSION(self) -> Any: ... - @property - @abc.abstractmethod - def SCHEMA(self) -> Any: ... - def to_object(self): ... diff --git a/stubs/kafka/protocol/commit.pyi b/stubs/kafka/protocol/commit.pyi deleted file mode 100644 index 5dee04f2..00000000 --- a/stubs/kafka/protocol/commit.pyi +++ /dev/null @@ -1,124 +0,0 @@ -from kafka.protocol.api import Request as Request, Response as Response -from kafka.protocol.types import Array as Array, Int16 as Int16, Int32 as Int32, Int64 as Int64, Int8 as Int8, Schema as Schema, String as String -from typing import Any - -class OffsetCommitResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class OffsetCommitResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class OffsetCommitResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class OffsetCommitResponse_v3(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class OffsetCommitRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class OffsetCommitRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class OffsetCommitRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - DEFAULT_GENERATION_ID: int = ... - DEFAULT_RETENTION_TIME: int = ... - -class OffsetCommitRequest_v3(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -OffsetCommitRequest: Any -OffsetCommitResponse: Any - -class OffsetFetchResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class OffsetFetchResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class OffsetFetchResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class OffsetFetchResponse_v3(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class OffsetFetchRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class OffsetFetchRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class OffsetFetchRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class OffsetFetchRequest_v3(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -OffsetFetchRequest: Any -OffsetFetchResponse: Any - -class GroupCoordinatorResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class GroupCoordinatorResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class GroupCoordinatorRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class GroupCoordinatorRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -GroupCoordinatorRequest: Any -GroupCoordinatorResponse: Any diff --git a/stubs/kafka/protocol/fetch.pyi b/stubs/kafka/protocol/fetch.pyi deleted file mode 100644 index 0f246615..00000000 --- a/stubs/kafka/protocol/fetch.pyi +++ /dev/null @@ -1,83 +0,0 @@ -from kafka.protocol.api import Request as Request, Response as Response -from kafka.protocol.types import Array as Array, Bytes as Bytes, Int16 as Int16, Int32 as Int32, Int64 as Int64, Int8 as Int8, Schema as Schema, String as String -from typing import Any - -class FetchResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class FetchResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class FetchResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class FetchResponse_v3(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class FetchResponse_v4(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class FetchResponse_v5(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class FetchResponse_v6(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class FetchRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class FetchRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class FetchRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class FetchRequest_v3(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class FetchRequest_v4(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class FetchRequest_v5(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class FetchRequest_v6(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -FetchRequest: Any -FetchResponse: Any diff --git a/stubs/kafka/protocol/frame.pyi b/stubs/kafka/protocol/frame.pyi deleted file mode 100644 index cbd99a49..00000000 --- a/stubs/kafka/protocol/frame.pyi +++ /dev/null @@ -1,8 +0,0 @@ -from typing import Any, Optional - -class KafkaBytes(bytearray): - def __init__(self, size: Any) -> None: ... - def read(self, nbytes: Optional[Any] = ...): ... - def write(self, data: Any) -> None: ... - def seek(self, idx: Any) -> None: ... - def tell(self): ... diff --git a/stubs/kafka/protocol/group.pyi b/stubs/kafka/protocol/group.pyi deleted file mode 100644 index 1becdb65..00000000 --- a/stubs/kafka/protocol/group.pyi +++ /dev/null @@ -1,124 +0,0 @@ -from kafka.protocol.api import Request as Request, Response as Response -from kafka.protocol.struct import Struct as Struct -from kafka.protocol.types import Array as Array, Bytes as Bytes, Int16 as Int16, Int32 as Int32, Schema as Schema, String as String -from typing import Any - -class JoinGroupResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class JoinGroupResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class JoinGroupResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class JoinGroupRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - UNKNOWN_MEMBER_ID: str = ... - -class JoinGroupRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - UNKNOWN_MEMBER_ID: str = ... - -class JoinGroupRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - UNKNOWN_MEMBER_ID: str = ... - -JoinGroupRequest: Any -JoinGroupResponse: Any - -class ProtocolMetadata(Struct): - SCHEMA: Any = ... - -class SyncGroupResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class SyncGroupResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class SyncGroupRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class SyncGroupRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -SyncGroupRequest: Any -SyncGroupResponse: Any - -class MemberAssignment(Struct): - SCHEMA: Any = ... - -class HeartbeatResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class HeartbeatResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class HeartbeatRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class HeartbeatRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -HeartbeatRequest: Any -HeartbeatResponse: Any - -class LeaveGroupResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class LeaveGroupResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class LeaveGroupRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class LeaveGroupRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -LeaveGroupRequest: Any -LeaveGroupResponse: Any diff --git a/stubs/kafka/protocol/message.pyi b/stubs/kafka/protocol/message.pyi deleted file mode 100644 index ce67c64b..00000000 --- a/stubs/kafka/protocol/message.pyi +++ /dev/null @@ -1,45 +0,0 @@ -from kafka.codec import gzip_decode as gzip_decode, has_gzip as has_gzip, has_lz4 as has_lz4, has_snappy as has_snappy, lz4_decode as lz4_decode, lz4_decode_old_kafka as lz4_decode_old_kafka, snappy_decode as snappy_decode -from kafka.protocol.frame import KafkaBytes as KafkaBytes -from kafka.protocol.struct import Struct as Struct -from kafka.protocol.types import AbstractType as AbstractType, Bytes as Bytes, Int32 as Int32, Int64 as Int64, Int8 as Int8, Schema as Schema -from kafka.util import WeakMethod as WeakMethod, crc32 as crc32 -from typing import Any, Optional - -class Message(Struct): - SCHEMAS: Any = ... - SCHEMA: Any = ... - CODEC_MASK: int = ... - CODEC_GZIP: int = ... - CODEC_SNAPPY: int = ... - CODEC_LZ4: int = ... - CODEC_ZSTD: int = ... - TIMESTAMP_TYPE_MASK: int = ... - HEADER_SIZE: int = ... - timestamp: Any = ... - crc: Any = ... - magic: Any = ... - attributes: Any = ... - key: Any = ... - value: Any = ... - encode: Any = ... - def __init__(self, value: Any, key: Optional[Any] = ..., magic: int = ..., attributes: int = ..., crc: int = ..., timestamp: Optional[Any] = ...) -> None: ... - @property - def timestamp_type(self): ... - @classmethod - def decode(cls, data: Any): ... - def validate_crc(self): ... - def is_compressed(self): ... - def decompress(self): ... - def __hash__(self) -> Any: ... - -class PartialMessage(bytes): ... - -class MessageSet(AbstractType): - ITEM: Any = ... - HEADER_SIZE: int = ... - @classmethod - def encode(cls, items: Any, prepend_size: bool = ...): ... - @classmethod - def decode(cls, data: Any, bytes_to_read: Optional[Any] = ...): ... - @classmethod - def repr(cls, messages: Any): ... diff --git a/stubs/kafka/protocol/metadata.pyi b/stubs/kafka/protocol/metadata.pyi deleted file mode 100644 index 4ee028ba..00000000 --- a/stubs/kafka/protocol/metadata.pyi +++ /dev/null @@ -1,83 +0,0 @@ -from kafka.protocol.api import Request as Request, Response as Response -from kafka.protocol.types import Array as Array, Boolean as Boolean, Int16 as Int16, Int32 as Int32, Schema as Schema, String as String -from typing import Any - -class MetadataResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class MetadataResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class MetadataResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class MetadataResponse_v3(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class MetadataResponse_v4(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class MetadataResponse_v5(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class MetadataRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - ALL_TOPICS: Any = ... - -class MetadataRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - ALL_TOPICS: int = ... - NO_TOPICS: Any = ... - -class MetadataRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - ALL_TOPICS: int = ... - NO_TOPICS: Any = ... - -class MetadataRequest_v3(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - ALL_TOPICS: int = ... - NO_TOPICS: Any = ... - -class MetadataRequest_v4(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - ALL_TOPICS: int = ... - NO_TOPICS: Any = ... - -class MetadataRequest_v5(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - ALL_TOPICS: int = ... - NO_TOPICS: Any = ... - -MetadataRequest: Any -MetadataResponse: Any diff --git a/stubs/kafka/protocol/offset.pyi b/stubs/kafka/protocol/offset.pyi deleted file mode 100644 index 290c403f..00000000 --- a/stubs/kafka/protocol/offset.pyi +++ /dev/null @@ -1,49 +0,0 @@ -from kafka.protocol.api import Request as Request, Response as Response -from kafka.protocol.types import Array as Array, Int16 as Int16, Int32 as Int32, Int64 as Int64, Int8 as Int8, Schema as Schema, String as String -from typing import Any - -UNKNOWN_OFFSET: int - -class OffsetResetStrategy: - LATEST: int = ... - EARLIEST: int = ... - NONE: int = ... - -class OffsetResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class OffsetResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class OffsetResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class OffsetRequest_v0(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - DEFAULTS: Any = ... - -class OffsetRequest_v1(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - DEFAULTS: Any = ... - -class OffsetRequest_v2(Request): - API_KEY: int = ... - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - DEFAULTS: Any = ... - -OffsetRequest: Any -OffsetResponse: Any diff --git a/stubs/kafka/protocol/parser.pyi b/stubs/kafka/protocol/parser.pyi deleted file mode 100644 index b985b356..00000000 --- a/stubs/kafka/protocol/parser.pyi +++ /dev/null @@ -1,15 +0,0 @@ -from kafka.protocol.api import RequestHeader as RequestHeader -from kafka.protocol.commit import GroupCoordinatorResponse as GroupCoordinatorResponse -from kafka.protocol.frame import KafkaBytes as KafkaBytes -from kafka.protocol.types import Int32 as Int32 -from typing import Any, Optional - -log: Any - -class KafkaProtocol: - in_flight_requests: Any = ... - bytes_to_send: Any = ... - def __init__(self, client_id: Optional[Any] = ..., api_version: Optional[Any] = ...) -> None: ... - def send_request(self, request: Any, correlation_id: Optional[Any] = ...): ... - def send_bytes(self): ... - def receive_bytes(self, data: Any): ... diff --git a/stubs/kafka/protocol/pickle.pyi b/stubs/kafka/protocol/pickle.pyi deleted file mode 100644 index e69de29b..00000000 diff --git a/stubs/kafka/protocol/produce.pyi b/stubs/kafka/protocol/produce.pyi deleted file mode 100644 index 4834399b..00000000 --- a/stubs/kafka/protocol/produce.pyi +++ /dev/null @@ -1,85 +0,0 @@ -import abc -from kafka.protocol.api import Request as Request, Response as Response -from kafka.protocol.types import Array as Array, Bytes as Bytes, Int16 as Int16, Int32 as Int32, Int64 as Int64, Schema as Schema, String as String -from typing import Any - -class ProduceResponse_v0(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ProduceResponse_v1(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ProduceResponse_v2(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ProduceResponse_v3(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ProduceResponse_v4(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ProduceResponse_v5(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ProduceResponse_v6(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ProduceResponse_v7(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ProduceResponse_v8(Response): - API_KEY: int = ... - API_VERSION: int = ... - SCHEMA: Any = ... - -class ProduceRequest(Request, metaclass=abc.ABCMeta): - API_KEY: int = ... - def expect_response(self): ... - -class ProduceRequest_v0(ProduceRequest): - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class ProduceRequest_v1(ProduceRequest): - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class ProduceRequest_v2(ProduceRequest): - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class ProduceRequest_v3(ProduceRequest): - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class ProduceRequest_v4(ProduceRequest): - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -class ProduceRequest_v5(ProduceRequest): - API_VERSION: int = ... - RESPONSE_TYPE: Any = ... - SCHEMA: Any = ... - -ProduceResponse: Any diff --git a/stubs/kafka/protocol/struct.pyi b/stubs/kafka/protocol/struct.pyi deleted file mode 100644 index c892760a..00000000 --- a/stubs/kafka/protocol/struct.pyi +++ /dev/null @@ -1,13 +0,0 @@ -from kafka.protocol.abstract import AbstractType as AbstractType -from typing import Any - -class Struct(AbstractType): - SCHEMA: Any = ... - def __init__(self, *args: Any, **kwargs: Any) -> None: ... - @classmethod - def encode(cls, item: Any): ... - @classmethod - def decode(cls, data: Any): ... - def get_item(self, name: Any): ... - def __hash__(self) -> Any: ... - def __eq__(self, other: Any) -> Any: ... diff --git a/stubs/kafka/protocol/types.pyi b/stubs/kafka/protocol/types.pyi deleted file mode 100644 index aa9e6557..00000000 --- a/stubs/kafka/protocol/types.pyi +++ /dev/null @@ -1,62 +0,0 @@ -from kafka.protocol.abstract import AbstractType as AbstractType -from typing import Any - -class Int8(AbstractType): - @classmethod - def encode(cls, value: Any): ... - @classmethod - def decode(cls, data: Any): ... - -class Int16(AbstractType): - @classmethod - def encode(cls, value: Any): ... - @classmethod - def decode(cls, data: Any): ... - -class Int32(AbstractType): - @classmethod - def encode(cls, value: Any): ... - @classmethod - def decode(cls, data: Any): ... - -class Int64(AbstractType): - @classmethod - def encode(cls, value: Any): ... - @classmethod - def decode(cls, data: Any): ... - -class String(AbstractType): - encoding: Any = ... - def __init__(self, encoding: str = ...) -> None: ... - def encode(self, value: Any): ... - def decode(self, data: Any): ... - -class Bytes(AbstractType): - @classmethod - def encode(cls, value: Any): ... - @classmethod - def decode(cls, data: Any): ... - @classmethod - def repr(cls, value: Any): ... - -class Boolean(AbstractType): - @classmethod - def encode(cls, value: Any): ... - @classmethod - def decode(cls, data: Any): ... - -class Schema(AbstractType): - def __init__(self, *fields: Any) -> None: ... - def encode(self, item: Any): ... - def decode(self, data: Any): ... - def __len__(self): ... - @classmethod - def repr(cls, value: Any): ... - -class Array(AbstractType): - array_of: Any = ... - def __init__(self, *array_of: Any) -> None: ... - def encode(self, items: Any): ... - def decode(self, data: Any): ... - @classmethod - def repr(cls, value: Any): ... diff --git a/stubs/kafka/record/__init__.pyi b/stubs/kafka/record/__init__.pyi deleted file mode 100644 index 47484282..00000000 --- a/stubs/kafka/record/__init__.pyi +++ /dev/null @@ -1 +0,0 @@ -from kafka.record.memory_records import MemoryRecords as MemoryRecords, MemoryRecordsBuilder as MemoryRecordsBuilder diff --git a/stubs/kafka/record/_crc32c.pyi b/stubs/kafka/record/_crc32c.pyi deleted file mode 100644 index 8dda7a89..00000000 --- a/stubs/kafka/record/_crc32c.pyi +++ /dev/null @@ -1,8 +0,0 @@ -from typing import Any - -CRC_TABLE: Any -CRC_INIT: int - -def crc_update(crc: Any, data: Any): ... -def crc_finalize(crc: Any): ... -def crc(data: Any): ... diff --git a/stubs/kafka/record/abc.pyi b/stubs/kafka/record/abc.pyi deleted file mode 100644 index 59248e1e..00000000 --- a/stubs/kafka/record/abc.pyi +++ /dev/null @@ -1,51 +0,0 @@ -import abc -from typing import Any, Optional - -class ABCRecord(metaclass=abc.ABCMeta): - __metaclass__: Any = ... - @property - @abc.abstractmethod - def offset(self) -> Any: ... - @property - @abc.abstractmethod - def timestamp(self) -> Any: ... - @property - @abc.abstractmethod - def timestamp_type(self) -> Any: ... - @property - @abc.abstractmethod - def key(self) -> Any: ... - @property - @abc.abstractmethod - def value(self) -> Any: ... - @property - @abc.abstractmethod - def checksum(self) -> Any: ... - @property - @abc.abstractmethod - def headers(self) -> Any: ... - -class ABCRecordBatchBuilder(metaclass=abc.ABCMeta): - __metaclass__: Any = ... - @abc.abstractmethod - def append(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Optional[Any] = ...) -> Any: ... - @abc.abstractmethod - def size_in_bytes(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Any) -> Any: ... - @abc.abstractmethod - def build(self) -> Any: ... - -class ABCRecordBatch(metaclass=abc.ABCMeta): - __metaclass__: Any = ... - @abc.abstractmethod - def __iter__(self) -> Any: ... - -class ABCRecords(metaclass=abc.ABCMeta): - __metaclass__: Any = ... - @abc.abstractmethod - def __init__(self, buffer: Any) -> None: ... - @abc.abstractmethod - def size_in_bytes(self) -> Any: ... - @abc.abstractmethod - def next_batch(self) -> Any: ... - @abc.abstractmethod - def has_next(self) -> Any: ... diff --git a/stubs/kafka/record/default_records.pyi b/stubs/kafka/record/default_records.pyi deleted file mode 100644 index 5431f888..00000000 --- a/stubs/kafka/record/default_records.pyi +++ /dev/null @@ -1,92 +0,0 @@ -from kafka.codec import gzip_decode as gzip_decode, gzip_encode as gzip_encode, lz4_decode as lz4_decode, lz4_encode as lz4_encode, snappy_decode as snappy_decode, snappy_encode as snappy_encode -from kafka.errors import CorruptRecordException as CorruptRecordException, UnsupportedCodecError as UnsupportedCodecError -from kafka.record.abc import ABCRecord as ABCRecord, ABCRecordBatch as ABCRecordBatch, ABCRecordBatchBuilder as ABCRecordBatchBuilder -from kafka.record.util import calc_crc32c as calc_crc32c, decode_varint as decode_varint, encode_varint as encode_varint, size_of_varint as size_of_varint -from typing import Any, Optional - -class DefaultRecordBase: - HEADER_STRUCT: Any = ... - ATTRIBUTES_OFFSET: Any = ... - CRC_OFFSET: Any = ... - AFTER_LEN_OFFSET: Any = ... - CODEC_MASK: int = ... - CODEC_NONE: int = ... - CODEC_GZIP: int = ... - CODEC_SNAPPY: int = ... - CODEC_LZ4: int = ... - CODEC_ZSTD: int = ... - TIMESTAMP_TYPE_MASK: int = ... - TRANSACTIONAL_MASK: int = ... - CONTROL_MASK: int = ... - LOG_APPEND_TIME: int = ... - CREATE_TIME: int = ... - -class DefaultRecordBatch(DefaultRecordBase, ABCRecordBatch): - def __init__(self, buffer: Any) -> None: ... - @property - def base_offset(self): ... - @property - def magic(self): ... - @property - def crc(self): ... - @property - def attributes(self): ... - @property - def last_offset_delta(self): ... - @property - def compression_type(self): ... - @property - def timestamp_type(self): ... - @property - def is_transactional(self): ... - @property - def is_control_batch(self): ... - @property - def first_timestamp(self): ... - @property - def max_timestamp(self): ... - def __iter__(self) -> Any: ... - def __next__(self): ... - next: Any = ... - def validate_crc(self): ... - -class DefaultRecord(ABCRecord): - def __init__(self, offset: Any, timestamp: Any, timestamp_type: Any, key: Any, value: Any, headers: Any) -> None: ... - @property - def offset(self): ... - @property - def timestamp(self): ... - @property - def timestamp_type(self): ... - @property - def key(self): ... - @property - def value(self): ... - @property - def headers(self): ... - @property - def checksum(self) -> None: ... - -class DefaultRecordBatchBuilder(DefaultRecordBase, ABCRecordBatchBuilder): - MAX_RECORD_OVERHEAD: int = ... - def __init__(self, magic: Any, compression_type: Any, is_transactional: Any, producer_id: Any, producer_epoch: Any, base_sequence: Any, batch_size: Any) -> None: ... - def append(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Optional[Any] = ...): ... - def write_header(self, use_compression_type: bool = ...) -> None: ... - def build(self): ... - def size(self): ... - def size_in_bytes(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Any): ... - @classmethod - def size_of(cls, key: Any, value: Any, headers: Any): ... - @classmethod - def estimate_size_in_bytes(cls, key: Any, value: Any, headers: Any): ... - -class DefaultRecordMetadata: - def __init__(self, offset: Any, size: Any, timestamp: Any) -> None: ... - @property - def offset(self): ... - @property - def crc(self) -> None: ... - @property - def size(self): ... - @property - def timestamp(self): ... diff --git a/stubs/kafka/record/legacy_records.pyi b/stubs/kafka/record/legacy_records.pyi deleted file mode 100644 index 9533fd63..00000000 --- a/stubs/kafka/record/legacy_records.pyi +++ /dev/null @@ -1,77 +0,0 @@ -from kafka.codec import gzip_decode as gzip_decode, gzip_encode as gzip_encode, lz4_decode as lz4_decode, lz4_decode_old_kafka as lz4_decode_old_kafka, lz4_encode as lz4_encode, lz4_encode_old_kafka as lz4_encode_old_kafka, snappy_decode as snappy_decode, snappy_encode as snappy_encode -from kafka.errors import CorruptRecordException as CorruptRecordException, UnsupportedCodecError as UnsupportedCodecError -from kafka.record.abc import ABCRecord as ABCRecord, ABCRecordBatch as ABCRecordBatch, ABCRecordBatchBuilder as ABCRecordBatchBuilder -from kafka.record.util import calc_crc32 as calc_crc32 -from typing import Any, Optional - -class LegacyRecordBase: - HEADER_STRUCT_V0: Any = ... - HEADER_STRUCT_V1: Any = ... - LOG_OVERHEAD: Any = ... - CRC_OFFSET: Any = ... - MAGIC_OFFSET: Any = ... - RECORD_OVERHEAD_V0: Any = ... - RECORD_OVERHEAD_V1: Any = ... - KEY_OFFSET_V0: Any = ... - KEY_OFFSET_V1: Any = ... - KEY_LENGTH: Any = ... - VALUE_LENGTH: Any = ... - CODEC_MASK: int = ... - CODEC_NONE: int = ... - CODEC_GZIP: int = ... - CODEC_SNAPPY: int = ... - CODEC_LZ4: int = ... - TIMESTAMP_TYPE_MASK: int = ... - LOG_APPEND_TIME: int = ... - CREATE_TIME: int = ... - NO_TIMESTAMP: int = ... - -class LegacyRecordBatch(ABCRecordBatch, LegacyRecordBase): - def __init__(self, buffer: Any, magic: Any) -> None: ... - @property - def timestamp_type(self): ... - @property - def compression_type(self): ... - def validate_crc(self): ... - def __iter__(self) -> Any: ... - -class LegacyRecord(ABCRecord): - def __init__(self, offset: Any, timestamp: Any, timestamp_type: Any, key: Any, value: Any, crc: Any) -> None: ... - @property - def offset(self): ... - @property - def timestamp(self): ... - @property - def timestamp_type(self): ... - @property - def key(self): ... - @property - def value(self): ... - @property - def headers(self): ... - @property - def checksum(self): ... - -class LegacyRecordBatchBuilder(ABCRecordBatchBuilder, LegacyRecordBase): - def __init__(self, magic: Any, compression_type: Any, batch_size: Any) -> None: ... - def append(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Optional[Any] = ...): ... - def build(self): ... - def size(self): ... - def size_in_bytes(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Optional[Any] = ...): ... - @classmethod - def record_size(cls, magic: Any, key: Any, value: Any): ... - @classmethod - def record_overhead(cls, magic: Any): ... - @classmethod - def estimate_size_in_bytes(cls, magic: Any, compression_type: Any, key: Any, value: Any): ... - -class LegacyRecordMetadata: - def __init__(self, offset: Any, crc: Any, size: Any, timestamp: Any) -> None: ... - @property - def offset(self): ... - @property - def crc(self): ... - @property - def size(self): ... - @property - def timestamp(self): ... diff --git a/stubs/kafka/record/memory_records.pyi b/stubs/kafka/record/memory_records.pyi deleted file mode 100644 index 29e8984a..00000000 --- a/stubs/kafka/record/memory_records.pyi +++ /dev/null @@ -1,26 +0,0 @@ -from kafka.errors import CorruptRecordException as CorruptRecordException -from kafka.record.abc import ABCRecords as ABCRecords -from kafka.record.default_records import DefaultRecordBatch as DefaultRecordBatch, DefaultRecordBatchBuilder as DefaultRecordBatchBuilder -from kafka.record.legacy_records import LegacyRecordBatch as LegacyRecordBatch, LegacyRecordBatchBuilder as LegacyRecordBatchBuilder -from typing import Any - -class MemoryRecords(ABCRecords): - LENGTH_OFFSET: Any = ... - LOG_OVERHEAD: Any = ... - MAGIC_OFFSET: Any = ... - MIN_SLICE: Any = ... - def __init__(self, bytes_data: Any) -> None: ... - def size_in_bytes(self): ... - def valid_bytes(self): ... - def has_next(self): ... - def next_batch(self, _min_slice: Any = ..., _magic_offset: Any = ...): ... - -class MemoryRecordsBuilder: - def __init__(self, magic: Any, compression_type: Any, batch_size: Any) -> None: ... - def append(self, timestamp: Any, key: Any, value: Any, headers: Any = ...): ... - def close(self) -> None: ... - def size_in_bytes(self): ... - def compression_rate(self): ... - def is_full(self): ... - def next_offset(self): ... - def buffer(self): ... diff --git a/stubs/kafka/record/util.pyi b/stubs/kafka/record/util.pyi deleted file mode 100644 index c0cb3f0f..00000000 --- a/stubs/kafka/record/util.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from typing import Any - -def encode_varint(value: Any, write: Any): ... -def size_of_varint(value: Any): ... -def decode_varint(buffer: Any, pos: int = ...): ... -def calc_crc32c(memview: Any, _crc32c: Any = ...): ... -def calc_crc32(memview: Any): ... diff --git a/stubs/kafka/scram.pyi b/stubs/kafka/scram.pyi deleted file mode 100644 index 8986595a..00000000 --- a/stubs/kafka/scram.pyi +++ /dev/null @@ -1,26 +0,0 @@ -from typing import Any - -def xor_bytes(left: Any, right: Any): ... - -class ScramClient: - MECHANISMS: Any = ... - nonce: Any = ... - auth_message: str = ... - salted_password: Any = ... - user: Any = ... - password: Any = ... - hashfunc: Any = ... - hashname: Any = ... - stored_key: Any = ... - client_key: Any = ... - client_signature: Any = ... - client_proof: Any = ... - server_key: Any = ... - server_signature: Any = ... - def __init__(self, user: Any, password: Any, mechanism: Any) -> None: ... - def first_message(self): ... - def process_server_first_message(self, server_first_message: Any) -> None: ... - def hmac(self, key: Any, msg: Any): ... - def create_salted_password(self, salt: Any, iterations: Any) -> None: ... - def final_message(self): ... - def process_server_final_message(self, server_final_message: Any) -> None: ... diff --git a/stubs/kafka/serializer/__init__.pyi b/stubs/kafka/serializer/__init__.pyi deleted file mode 100644 index b9caa9a2..00000000 --- a/stubs/kafka/serializer/__init__.pyi +++ /dev/null @@ -1 +0,0 @@ -from kafka.serializer.abstract import Deserializer as Deserializer, Serializer as Serializer diff --git a/stubs/kafka/serializer/abstract.pyi b/stubs/kafka/serializer/abstract.pyi deleted file mode 100644 index 8d76e616..00000000 --- a/stubs/kafka/serializer/abstract.pyi +++ /dev/null @@ -1,16 +0,0 @@ -import abc -from typing import Any - -class Serializer(metaclass=abc.ABCMeta): - __meta__: Any = ... - def __init__(self, **config: Any) -> None: ... - @abc.abstractmethod - def serialize(self, topic: Any, value: Any) -> Any: ... - def close(self) -> None: ... - -class Deserializer(metaclass=abc.ABCMeta): - __meta__: Any = ... - def __init__(self, **config: Any) -> None: ... - @abc.abstractmethod - def deserialize(self, topic: Any, bytes_: Any) -> Any: ... - def close(self) -> None: ... diff --git a/stubs/kafka/structs.pyi b/stubs/kafka/structs.pyi deleted file mode 100644 index 33aff419..00000000 --- a/stubs/kafka/structs.pyi +++ /dev/null @@ -1,37 +0,0 @@ -from collections import namedtuple -from typing import NamedTuple, Optional, Sequence - - -class TopicPartition(NamedTuple): - topic: str - partition: int - - -class BrokerMetadata(NamedTuple): - nodeId: int - host: str - port: int - rack: str - - -class PartitionMetadata(NamedTuple): - topic: str - partition: int - leader: int - replicas: Sequence[int] - isr: Sequence[int] - error: int - - -class OffsetAndMetadata(NamedTuple): - offset: int - metadata: str - - -class OffsetAndTimestamp(NamedTuple): - offset: int - timestamp: Optional[int] # Only None if used with old broker version - - -# Deprecated part -RetryOptions = namedtuple('RetryOptions', ['limit', 'backoff_ms', 'retry_on_timeouts']) diff --git a/stubs/kafka/util.pyi b/stubs/kafka/util.pyi deleted file mode 100644 index 7eca44e4..00000000 --- a/stubs/kafka/util.pyi +++ /dev/null @@ -1,3 +0,0 @@ - -def crc32(data: bytes) -> int: - ... diff --git a/stubs/kafka/version.pyi b/stubs/kafka/version.pyi deleted file mode 100644 index bda5b5a7..00000000 --- a/stubs/kafka/version.pyi +++ /dev/null @@ -1 +0,0 @@ -__version__: str diff --git a/tests/_testutil.py b/tests/_testutil.py index 7aac6ec6..5a4b5f02 100644 --- a/tests/_testutil.py +++ b/tests/_testutil.py @@ -422,10 +422,14 @@ def random_string(length): def wait_kafka(kafka_host, kafka_port, timeout=60): - loop = asyncio.get_event_loop() - return loop.run_until_complete( - _wait_kafka(kafka_host, kafka_port, timeout) - ) + loop = asyncio.new_event_loop() + try: + res = loop.run_until_complete( + _wait_kafka(kafka_host, kafka_port, timeout) + ) + finally: + loop.close() + return res async def _wait_kafka(kafka_host, kafka_port, timeout): diff --git a/tests/conftest.py b/tests/conftest.py index 0d6083e2..1a250007 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -50,9 +50,12 @@ def pytest_configure(config): @pytest.fixture(scope='session') def docker(request): image = request.config.getoption('--docker-image') - if not image: - return None - return libdocker.from_env() + if image: + client = libdocker.from_env() + yield client + client.close() + else: + yield None @pytest.fixture(scope='class') @@ -231,7 +234,7 @@ def kafka_server(request, docker, docker_ip_address, kafka_sasl_ssl_port, container ) finally: - container.remove(force=True) + container.stop() else: diff --git a/tests/test_client.py b/tests/test_client.py index 332b9916..3fbec2ee 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -17,7 +17,9 @@ from aiokafka.client import AIOKafkaClient, ConnectionGroup, CoordinationType from aiokafka.conn import AIOKafkaConnection, CloseReason from aiokafka.util import create_task, get_running_loop -from ._testutil import KafkaIntegrationTestCase, run_until_complete +from ._testutil import ( + KafkaIntegrationTestCase, run_until_complete, kafka_versions +) NO_ERROR = 0 @@ -288,6 +290,7 @@ async def test_send_request(self): self.assertTrue(isinstance(resp, MetadataResponse)) await client.close() + @kafka_versions('<2.6') # FIXME Not implemented yet @run_until_complete async def test_check_version(self): kafka_version = tuple(int(x) for x in self.kafka_version.split(".")) @@ -297,11 +300,6 @@ async def test_check_version(self): ver = await client.check_version() expected_version = kafka_version[:2] - # No significant protocol changed, no way to differencieate - if expected_version == (2, 2): - expected_version = (2, 1) - elif expected_version == (2, 4): - expected_version = (2, 3) self.assertEqual(expected_version, ver[:2]) await self.wait_topic(client, 'some_test_topic') ver2 = await client.check_version() diff --git a/tests/test_conn.py b/tests/test_conn.py index afd3080d..3862c11c 100644 --- a/tests/test_conn.py +++ b/tests/test_conn.py @@ -15,6 +15,7 @@ SaslHandShakeRequest, SaslHandShakeResponse, SaslAuthenticateRequest, SaslAuthenticateResponse ) +from kafka.protocol.produce import ProduceRequest_v0 as ProduceRequest from aiokafka.conn import AIOKafkaConnection, create_conn, VersionInfo from aiokafka.errors import ( @@ -23,7 +24,6 @@ ) from aiokafka.record.legacy_records import LegacyRecordBatchBuilder from ._testutil import KafkaIntegrationTestCase, run_until_complete -from aiokafka.protocol.produce import ProduceRequest_v0 as ProduceRequest from aiokafka.util import get_running_loop diff --git a/tests/test_consumer.py b/tests/test_consumer.py index f37f0578..5a68c299 100644 --- a/tests/test_consumer.py +++ b/tests/test_consumer.py @@ -130,6 +130,7 @@ def test_create_consumer_no_running_loop(self): loop.run_until_complete(consumer.getone()) finally: loop.run_until_complete(consumer.stop()) + loop.close() @run_until_complete async def test_consumer_context_manager(self): @@ -1290,6 +1291,7 @@ async def test_rebalance_listener_with_coroutines(self): await self.send_messages(1, list(range(10, 20))) main_self = self + faults = [] class SimpleRebalanceListener(ConsumerRebalanceListener): def __init__(self, consumer): @@ -1305,16 +1307,24 @@ async def on_partitions_revoked(self, revoked): # Confirm that coordinator is actually waiting for callback to # complete await asyncio.sleep(0.2) - main_self.assertTrue( - self.consumer._coordinator.needs_join_prepare) + try: + main_self.assertTrue( + self.consumer._coordinator._rejoin_needed_fut.done()) + except Exception as exc: + # Exceptions here are intercepted by GroupCoordinator + faults.append(exc) async def on_partitions_assigned(self, assigned): self.assign_mock(assigned) # Confirm that coordinator is actually waiting for callback to # complete await asyncio.sleep(0.2) - main_self.assertFalse( - self.consumer._coordinator.needs_join_prepare) + try: + main_self.assertFalse( + self.consumer._coordinator._rejoin_needed_fut.done()) + except Exception as exc: + # Exceptions here are intercepted by GroupCoordinator + faults.append(exc) tp0 = TopicPartition(self.topic, 0) tp1 = TopicPartition(self.topic, 1) @@ -1333,6 +1343,8 @@ async def on_partitions_assigned(self, assigned): self.assertEqual(msg.value, b"10") listener1.revoke_mock.assert_called_with(set()) listener1.assign_mock.assert_called_with({tp0, tp1}) + if faults: + raise faults[0] # By adding a 2nd consumer we trigger rebalance consumer2 = AIOKafkaConsumer( @@ -1367,6 +1379,8 @@ async def on_partitions_assigned(self, assigned): self.assertEqual(listener2.revoke_mock.call_count, 1) listener2.assign_mock.assert_called_with(c2_assignment) self.assertEqual(listener2.assign_mock.call_count, 1) + if faults: + raise faults[0] @run_until_complete async def test_rebalance_listener_no_deadlock_callbacks(self): diff --git a/tests/test_fetcher.py b/tests/test_fetcher.py index 74d522f3..9b7368f3 100644 --- a/tests/test_fetcher.py +++ b/tests/test_fetcher.py @@ -3,6 +3,9 @@ import unittest from unittest import mock +from kafka.protocol.fetch import ( + FetchRequest_v0 as FetchRequest, FetchResponse_v0 as FetchResponse +) from kafka.protocol.offset import OffsetResponse from aiokafka.record.legacy_records import LegacyRecordBatchBuilder from aiokafka.record.default_records import ( @@ -10,8 +13,6 @@ _DefaultRecordBatchBuilderPy as DefaultRecordBatchBuilder) from aiokafka.record.memory_records import MemoryRecords -from aiokafka.protocol.fetch import ( - FetchRequest_v0 as FetchRequest, FetchResponse_v0 as FetchResponse) from aiokafka.errors import ( TopicAuthorizationFailedError, UnknownError, UnknownTopicOrPartitionError, OffsetOutOfRangeError, KafkaTimeoutError, NotLeaderForPartitionError diff --git a/tests/test_producer.py b/tests/test_producer.py index 5a890259..b0c6ff90 100644 --- a/tests/test_producer.py +++ b/tests/test_producer.py @@ -7,12 +7,12 @@ from unittest import mock from kafka.cluster import ClusterMetadata +from kafka.protocol.produce import ProduceResponse from ._testutil import ( KafkaIntegrationTestCase, run_until_complete, run_in_thread, kafka_versions ) -from aiokafka.protocol.produce import ProduceResponse from aiokafka.producer import AIOKafkaProducer from aiokafka.client import AIOKafkaClient from aiokafka.consumer import AIOKafkaConsumer @@ -148,6 +148,7 @@ def test_create_producer_no_running_loop(self): self.assertEqual(resp.offset, 0) finally: loop.run_until_complete(producer.stop()) + loop.close() @run_until_complete async def test_producer_context_manager(self): diff --git a/tests/test_sender.py b/tests/test_sender.py index d1f57dbf..01fdb1d8 100644 --- a/tests/test_sender.py +++ b/tests/test_sender.py @@ -4,6 +4,8 @@ KafkaIntegrationTestCase, run_until_complete, kafka_versions ) +from kafka.protocol.produce import ProduceRequest, ProduceResponse + from aiokafka.producer.sender import ( Sender, InitPIDHandler, AddPartitionsToTxnHandler, AddOffsetsToTxnHandler, TxnOffsetCommitHandler, EndTxnHandler, @@ -19,9 +21,6 @@ TxnOffsetCommitRequest, TxnOffsetCommitResponse, EndTxnRequest, EndTxnResponse ) -from aiokafka.protocol.produce import ( - ProduceRequest, ProduceResponse -) from aiokafka.producer.message_accumulator import MessageAccumulator from aiokafka.client import AIOKafkaClient, CoordinationType, ConnectionGroup from aiokafka.structs import TopicPartition, OffsetAndMetadata