Safe Haskell | None |
---|---|
Language | Haskell2010 |
Kafka.Consumer
Description
Module to consume messages from Kafka topics.
Here's an example of code to consume messages from a topic:
import Control.Exception (bracket) import Control.Monad (replicateM_) import Kafka.Consumer -- Global consumer properties consumerProps ::ConsumerProperties
consumerProps =brokersList
["localhost:9092"] <>groupId
(ConsumerGroupId
"consumer_example_group") <>noAutoCommit
<>logLevel
KafkaLogInfo
-- Subscription to topics consumerSub ::Subscription
consumerSub =topics
[TopicName
"kafka-client-example-topic"] <>offsetReset
Earliest
-- Running an example runConsumerExample :: IO () runConsumerExample = do res <- bracket mkConsumer clConsumer runHandler print res where mkConsumer =newConsumer
consumerProps consumerSub clConsumer (Left err) = pure (Left err) clConsumer (Right kc) = (maybe (Right ()) Left) <$>closeConsumer
kc runHandler (Left err) = pure (Left err) runHandler (Right kc) = processMessages kc -- Example polling 10 times before stopping processMessages ::KafkaConsumer
-> IO (EitherKafkaError
()) processMessages kafka = do replicateM_ 10 $ do msg <-pollMessage
kafka (Timeout
1000) putStrLn $ "Message: " <> show msg err <-commitAllOffsets
OffsetCommit
kafka putStrLn $ "Offsets: " <> maybe "Committed." show err pure $ Right ()
Synopsis
- data KafkaConsumer
- module Kafka.Consumer.ConsumerProperties
- module Kafka.Consumer.Subscription
- module Kafka.Types
- newtype ConsumerGroupId = ConsumerGroupId {}
- data OffsetReset
- newtype Offset = Offset {}
- data RebalanceEvent
- data PartitionOffset
- data SubscribedPartitions
- data Timestamp
- data OffsetCommit
- data OffsetStoreSync
- data OffsetStoreMethod
- data TopicPartition = TopicPartition {}
- data ConsumerRecord k v = ConsumerRecord {
- crTopic :: !TopicName
- crPartition :: !PartitionId
- crOffset :: !Offset
- crTimestamp :: !Timestamp
- crHeaders :: !Headers
- crKey :: !k
- crValue :: !v
- crMapKey :: (k -> k') -> ConsumerRecord k v -> ConsumerRecord k' v
- crMapValue :: (v -> v') -> ConsumerRecord k v -> ConsumerRecord k v'
- crMapKV :: (k -> k') -> (v -> v') -> ConsumerRecord k v -> ConsumerRecord k' v'
- sequenceFirst :: (Bitraversable t, Applicative f) => t (f k) v -> f (t k v)
- traverseFirst :: (Bitraversable t, Applicative f) => (k -> f k') -> t k v -> f (t k' v)
- traverseFirstM :: (Bitraversable t, Applicative f, Monad m) => (k -> m (f k')) -> t k v -> m (f (t k' v))
- traverseM :: (Traversable t, Applicative f, Monad m) => (v -> m (f v')) -> t v -> m (f (t v'))
- bitraverseM :: (Bitraversable t, Applicative f, Monad m) => (k -> m (f k')) -> (v -> m (f v')) -> t k v -> m (f (t k' v'))
- kcKafkaPtr :: KafkaConsumer -> Kafka
- kcKafkaConf :: KafkaConsumer -> KafkaConf
- runConsumer :: ConsumerProperties -> Subscription -> (KafkaConsumer -> IO (Either KafkaError a)) -> IO (Either KafkaError a)
- newConsumer :: MonadIO m => ConsumerProperties -> Subscription -> m (Either KafkaError KafkaConsumer)
- assign :: MonadIO m => KafkaConsumer -> [TopicPartition] -> m (Maybe KafkaError)
- assignment :: MonadIO m => KafkaConsumer -> m (Either KafkaError (Map TopicName [PartitionId]))
- subscription :: MonadIO m => KafkaConsumer -> m (Either KafkaError [(TopicName, SubscribedPartitions)])
- pausePartitions :: MonadIO m => KafkaConsumer -> [(TopicName, PartitionId)] -> m KafkaError
- resumePartitions :: MonadIO m => KafkaConsumer -> [(TopicName, PartitionId)] -> m KafkaError
- committed :: MonadIO m => KafkaConsumer -> Timeout -> [(TopicName, PartitionId)] -> m (Either KafkaError [TopicPartition])
- position :: MonadIO m => KafkaConsumer -> [(TopicName, PartitionId)] -> m (Either KafkaError [TopicPartition])
- seek :: MonadIO m => KafkaConsumer -> Timeout -> [TopicPartition] -> m (Maybe KafkaError)
- seekPartitions :: MonadIO m => KafkaConsumer -> [TopicPartition] -> Timeout -> m (Maybe KafkaError)
- pollMessage :: MonadIO m => KafkaConsumer -> Timeout -> m (Either KafkaError (ConsumerRecord (Maybe ByteString) (Maybe ByteString)))
- pollConsumerEvents :: KafkaConsumer -> Maybe Timeout -> IO ()
- pollMessageBatch :: MonadIO m => KafkaConsumer -> Timeout -> BatchSize -> m [Either KafkaError (ConsumerRecord (Maybe ByteString) (Maybe ByteString))]
- commitOffsetMessage :: MonadIO m => OffsetCommit -> KafkaConsumer -> ConsumerRecord k v -> m (Maybe KafkaError)
- commitAllOffsets :: MonadIO m => OffsetCommit -> KafkaConsumer -> m (Maybe KafkaError)
- commitPartitionsOffsets :: MonadIO m => OffsetCommit -> KafkaConsumer -> [TopicPartition] -> m (Maybe KafkaError)
- storeOffsets :: MonadIO m => KafkaConsumer -> [TopicPartition] -> m (Maybe KafkaError)
- storeOffsetMessage :: MonadIO m => KafkaConsumer -> ConsumerRecord k v -> m (Maybe KafkaError)
- rewindConsumer :: MonadIO m => KafkaConsumer -> Timeout -> m (Maybe KafkaError)
- closeConsumer :: MonadIO m => KafkaConsumer -> m (Maybe KafkaError)
- data RdKafkaRespErrT
- = RdKafkaRespErrBegin
- | RdKafkaRespErrBadMsg
- | RdKafkaRespErrBadCompression
- | RdKafkaRespErrDestroy
- | RdKafkaRespErrFail
- | RdKafkaRespErrTransport
- | RdKafkaRespErrCritSysResource
- | RdKafkaRespErrResolve
- | RdKafkaRespErrMsgTimedOut
- | RdKafkaRespErrPartitionEof
- | RdKafkaRespErrUnknownPartition
- | RdKafkaRespErrFs
- | RdKafkaRespErrUnknownTopic
- | RdKafkaRespErrAllBrokersDown
- | RdKafkaRespErrInvalidArg
- | RdKafkaRespErrTimedOut
- | RdKafkaRespErrQueueFull
- | RdKafkaRespErrIsrInsuff
- | RdKafkaRespErrNodeUpdate
- | RdKafkaRespErrSsl
- | RdKafkaRespErrWaitCoord
- | RdKafkaRespErrUnknownGroup
- | RdKafkaRespErrInProgress
- | RdKafkaRespErrPrevInProgress
- | RdKafkaRespErrExistingSubscription
- | RdKafkaRespErrAssignPartitions
- | RdKafkaRespErrRevokePartitions
- | RdKafkaRespErrConflict
- | RdKafkaRespErrState
- | RdKafkaRespErrUnknownProtocol
- | RdKafkaRespErrNotImplemented
- | RdKafkaRespErrAuthentication
- | RdKafkaRespErrNoOffset
- | RdKafkaRespErrOutdated
- | RdKafkaRespErrTimedOutQueue
- | RdKafkaRespErrUnsupportedFeature
- | RdKafkaRespErrWaitCache
- | RdKafkaRespErrIntr
- | RdKafkaRespErrKeySerialization
- | RdKafkaRespErrValueSerialization
- | RdKafkaRespErrKeyDeserialization
- | RdKafkaRespErrValueDeserialization
- | RdKafkaRespErrPartial
- | RdKafkaRespErrReadOnly
- | RdKafkaRespErrNoent
- | RdKafkaRespErrUnderflow
- | RdKafkaRespErrInvalidType
- | RdKafkaRespErrRetry
- | RdKafkaRespErrPurgeQueue
- | RdKafkaRespErrPurgeInflight
- | RdKafkaRespErrFatal
- | RdKafkaRespErrInconsistent
- | RdKafkaRespErrGaplessGuarantee
- | RdKafkaRespErrMaxPollExceeded
- | RdKafkaRespErrUnknownBroker
- | RdKafkaRespErrNotConfigured
- | RdKafkaRespErrFenced
- | RdKafkaRespErrApplication
- | RdKafkaRespErrAssignmentLost
- | RdKafkaRespErrNoop
- | RdKafkaRespErrAutoOffsetReset
- | RdKafkaRespErrLogTruncation
- | RdKafkaRespErrEnd
- | RdKafkaRespErrUnknown
- | RdKafkaRespErrNoError
- | RdKafkaRespErrOffsetOutOfRange
- | RdKafkaRespErrInvalidMsg
- | RdKafkaRespErrUnknownTopicOrPart
- | RdKafkaRespErrInvalidMsgSize
- | RdKafkaRespErrLeaderNotAvailable
- | RdKafkaRespErrNotLeaderForPartition
- | RdKafkaRespErrRequestTimedOut
- | RdKafkaRespErrBrokerNotAvailable
- | RdKafkaRespErrReplicaNotAvailable
- | RdKafkaRespErrMsgSizeTooLarge
- | RdKafkaRespErrStaleCtrlEpoch
- | RdKafkaRespErrOffsetMetadataTooLarge
- | RdKafkaRespErrNetworkException
- | RdKafkaRespErrCoordinatorLoadInProgress
- | RdKafkaRespErrCoordinatorNotAvailable
- | RdKafkaRespErrNotCoordinator
- | RdKafkaRespErrTopicException
- | RdKafkaRespErrRecordListTooLarge
- | RdKafkaRespErrNotEnoughReplicas
- | RdKafkaRespErrNotEnoughReplicasAfterAppend
- | RdKafkaRespErrInvalidRequiredAcks
- | RdKafkaRespErrIllegalGeneration
- | RdKafkaRespErrInconsistentGroupProtocol
- | RdKafkaRespErrInvalidGroupId
- | RdKafkaRespErrUnknownMemberId
- | RdKafkaRespErrInvalidSessionTimeout
- | RdKafkaRespErrRebalanceInProgress
- | RdKafkaRespErrInvalidCommitOffsetSize
- | RdKafkaRespErrTopicAuthorizationFailed
- | RdKafkaRespErrGroupAuthorizationFailed
- | RdKafkaRespErrClusterAuthorizationFailed
- | RdKafkaRespErrInvalidTimestamp
- | RdKafkaRespErrUnsupportedSaslMechanism
- | RdKafkaRespErrIllegalSaslState
- | RdKafkaRespErrUnsupportedVersion
- | RdKafkaRespErrTopicAlreadyExists
- | RdKafkaRespErrInvalidPartitions
- | RdKafkaRespErrInvalidReplicationFactor
- | RdKafkaRespErrInvalidReplicaAssignment
- | RdKafkaRespErrInvalidConfig
- | RdKafkaRespErrNotController
- | RdKafkaRespErrInvalidRequest
- | RdKafkaRespErrUnsupportedForMessageFormat
- | RdKafkaRespErrPolicyViolation
- | RdKafkaRespErrOutOfOrderSequenceNumber
- | RdKafkaRespErrDuplicateSequenceNumber
- | RdKafkaRespErrInvalidProducerEpoch
- | RdKafkaRespErrInvalidTxnState
- | RdKafkaRespErrInvalidProducerIdMapping
- | RdKafkaRespErrInvalidTransactionTimeout
- | RdKafkaRespErrConcurrentTransactions
- | RdKafkaRespErrTransactionCoordinatorFenced
- | RdKafkaRespErrTransactionalIdAuthorizationFailed
- | RdKafkaRespErrSecurityDisabled
- | RdKafkaRespErrOperationNotAttempted
- | RdKafkaRespErrKafkaStorageError
- | RdKafkaRespErrLogDirNotFound
- | RdKafkaRespErrSaslAuthenticationFailed
- | RdKafkaRespErrUnknownProducerId
- | RdKafkaRespErrReassignmentInProgress
- | RdKafkaRespErrDelegationTokenAuthDisabled
- | RdKafkaRespErrDelegationTokenNotFound
- | RdKafkaRespErrDelegationTokenOwnerMismatch
- | RdKafkaRespErrDelegationTokenRequestNotAllowed
- | RdKafkaRespErrDelegationTokenAuthorizationFailed
- | RdKafkaRespErrDelegationTokenExpired
- | RdKafkaRespErrInvalidPrincipalType
- | RdKafkaRespErrNonEmptyGroup
- | RdKafkaRespErrGroupIdNotFound
- | RdKafkaRespErrFetchSessionIdNotFound
- | RdKafkaRespErrInvalidFetchSessionEpoch
- | RdKafkaRespErrListenerNotFound
- | RdKafkaRespErrTopicDeletionDisabled
- | RdKafkaRespErrFencedLeaderEpoch
- | RdKafkaRespErrUnknownLeaderEpoch
- | RdKafkaRespErrUnsupportedCompressionType
- | RdKafkaRespErrStaleBrokerEpoch
- | RdKafkaRespErrOffsetNotAvailable
- | RdKafkaRespErrMemberIdRequired
- | RdKafkaRespErrPreferredLeaderNotAvailable
- | RdKafkaRespErrGroupMaxSizeReached
- | RdKafkaRespErrFencedInstanceId
- | RdKafkaRespErrEligibleLeadersNotAvailable
- | RdKafkaRespErrElectionNotNeeded
- | RdKafkaRespErrNoReassignmentInProgress
- | RdKafkaRespErrGroupSubscribedToTopic
- | RdKafkaRespErrInvalidRecord
- | RdKafkaRespErrUnstableOffsetCommit
- | RdKafkaRespErrThrottlingQuotaExceeded
- | RdKafkaRespErrProducerFenced
- | RdKafkaRespErrResourceNotFound
- | RdKafkaRespErrDuplicateResource
- | RdKafkaRespErrUnacceptableCredential
- | RdKafkaRespErrInconsistentVoterSet
- | RdKafkaRespErrInvalidUpdateVersion
- | RdKafkaRespErrFeatureUpdateFailed
- | RdKafkaRespErrPrincipalDeserializationFailure
- | RdKafkaRespErrEndAll
Documentation
data KafkaConsumer Source #
The main type for Kafka consumption, used e.g. to poll and commit messages.
Its constructor is intentionally not exposed, instead, one should use newConsumer
to acquire such a value.
module Kafka.Consumer.Subscription
module Kafka.Types
newtype ConsumerGroupId Source #
Consumer group ID. Different consumers with the same consumer group ID will get assigned different partitions of each subscribed topic.
Constructors
ConsumerGroupId | |
Fields |
Instances
data OffsetReset Source #
Where to reset the offset when there is no initial offset in Kafka
Instances
Generic OffsetReset Source # | |||||
Defined in Kafka.Consumer.Types Associated Types
| |||||
Show OffsetReset Source # | |||||
Defined in Kafka.Consumer.Types Methods showsPrec :: Int -> OffsetReset -> ShowS # show :: OffsetReset -> String # showList :: [OffsetReset] -> ShowS # | |||||
Eq OffsetReset Source # | |||||
Defined in Kafka.Consumer.Types | |||||
type Rep OffsetReset Source # | |||||
A message offset in a partition
data RebalanceEvent Source #
A set of events which happen during the rebalancing process
Constructors
RebalanceBeforeAssign [(TopicName, PartitionId)] | Happens before Kafka Client confirms new assignment |
RebalanceAssign [(TopicName, PartitionId)] | Happens after the new assignment is confirmed |
RebalanceBeforeRevoke [(TopicName, PartitionId)] | Happens before Kafka Client confirms partitions rejection |
RebalanceRevoke [(TopicName, PartitionId)] | Happens after the rejection is confirmed |
Instances
Generic RebalanceEvent Source # | |||||
Defined in Kafka.Consumer.Types Associated Types
Methods from :: RebalanceEvent -> Rep RebalanceEvent x # to :: Rep RebalanceEvent x -> RebalanceEvent # | |||||
Show RebalanceEvent Source # | |||||
Defined in Kafka.Consumer.Types Methods showsPrec :: Int -> RebalanceEvent -> ShowS # show :: RebalanceEvent -> String # showList :: [RebalanceEvent] -> ShowS # | |||||
Eq RebalanceEvent Source # | |||||
Defined in Kafka.Consumer.Types Methods (==) :: RebalanceEvent -> RebalanceEvent -> Bool # (/=) :: RebalanceEvent -> RebalanceEvent -> Bool # | |||||
type Rep RebalanceEvent Source # | |||||
Defined in Kafka.Consumer.Types type Rep RebalanceEvent = D1 ('MetaData "RebalanceEvent" "Kafka.Consumer.Types" "hw-kafka-client-5.3.0-BlVq66fKP0NAico3YeT0ux" 'False) ((C1 ('MetaCons "RebalanceBeforeAssign" 'PrefixI 'False) (S1 ('MetaSel ('Nothing :: Maybe Symbol) 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 [(TopicName, PartitionId)])) :+: C1 ('MetaCons "RebalanceAssign" 'PrefixI 'False) (S1 ('MetaSel ('Nothing :: Maybe Symbol) 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 [(TopicName, PartitionId)]))) :+: (C1 ('MetaCons "RebalanceBeforeRevoke" 'PrefixI 'False) (S1 ('MetaSel ('Nothing :: Maybe Symbol) 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 [(TopicName, PartitionId)])) :+: C1 ('MetaCons "RebalanceRevoke" 'PrefixI 'False) (S1 ('MetaSel ('Nothing :: Maybe Symbol) 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 [(TopicName, PartitionId)])))) |
data PartitionOffset Source #
The partition offset
Constructors
PartitionOffsetBeginning | |
PartitionOffsetEnd | |
PartitionOffset Int64 | |
PartitionOffsetStored | |
PartitionOffsetInvalid |
Instances
Generic PartitionOffset Source # | |||||
Defined in Kafka.Consumer.Types Associated Types
Methods from :: PartitionOffset -> Rep PartitionOffset x # to :: Rep PartitionOffset x -> PartitionOffset # | |||||
Show PartitionOffset Source # | |||||
Defined in Kafka.Consumer.Types Methods showsPrec :: Int -> PartitionOffset -> ShowS # show :: PartitionOffset -> String # showList :: [PartitionOffset] -> ShowS # | |||||
Eq PartitionOffset Source # | |||||
Defined in Kafka.Consumer.Types Methods (==) :: PartitionOffset -> PartitionOffset -> Bool # (/=) :: PartitionOffset -> PartitionOffset -> Bool # | |||||
type Rep PartitionOffset Source # | |||||
Defined in Kafka.Consumer.Types type Rep PartitionOffset = D1 ('MetaData "PartitionOffset" "Kafka.Consumer.Types" "hw-kafka-client-5.3.0-BlVq66fKP0NAico3YeT0ux" 'False) ((C1 ('MetaCons "PartitionOffsetBeginning" 'PrefixI 'False) (U1 :: Type -> Type) :+: C1 ('MetaCons "PartitionOffsetEnd" 'PrefixI 'False) (U1 :: Type -> Type)) :+: (C1 ('MetaCons "PartitionOffset" 'PrefixI 'False) (S1 ('MetaSel ('Nothing :: Maybe Symbol) 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 Int64)) :+: (C1 ('MetaCons "PartitionOffsetStored" 'PrefixI 'False) (U1 :: Type -> Type) :+: C1 ('MetaCons "PartitionOffsetInvalid" 'PrefixI 'False) (U1 :: Type -> Type)))) |
data SubscribedPartitions Source #
Partitions subscribed by a consumer
Constructors
SubscribedPartitions [PartitionId] | Subscribe only to those partitions |
SubscribedPartitionsAll | Subscribe to all partitions |
Instances
Generic SubscribedPartitions Source # | |||||
Defined in Kafka.Consumer.Types Associated Types
Methods from :: SubscribedPartitions -> Rep SubscribedPartitions x # to :: Rep SubscribedPartitions x -> SubscribedPartitions # | |||||
Show SubscribedPartitions Source # | |||||
Defined in Kafka.Consumer.Types Methods showsPrec :: Int -> SubscribedPartitions -> ShowS # show :: SubscribedPartitions -> String # showList :: [SubscribedPartitions] -> ShowS # | |||||
Eq SubscribedPartitions Source # | |||||
Defined in Kafka.Consumer.Types Methods (==) :: SubscribedPartitions -> SubscribedPartitions -> Bool # (/=) :: SubscribedPartitions -> SubscribedPartitions -> Bool # | |||||
type Rep SubscribedPartitions Source # | |||||
Defined in Kafka.Consumer.Types type Rep SubscribedPartitions = D1 ('MetaData "SubscribedPartitions" "Kafka.Consumer.Types" "hw-kafka-client-5.3.0-BlVq66fKP0NAico3YeT0ux" 'False) (C1 ('MetaCons "SubscribedPartitions" 'PrefixI 'False) (S1 ('MetaSel ('Nothing :: Maybe Symbol) 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 [PartitionId])) :+: C1 ('MetaCons "SubscribedPartitionsAll" 'PrefixI 'False) (U1 :: Type -> Type)) |
Consumer record timestamp
Constructors
CreateTime !Millis | |
LogAppendTime !Millis | |
NoTimestamp |
Instances
Generic Timestamp Source # | |||||
Defined in Kafka.Consumer.Types Associated Types
| |||||
Read Timestamp Source # | |||||
Show Timestamp Source # | |||||
Eq Timestamp Source # | |||||
type Rep Timestamp Source # | |||||
Defined in Kafka.Consumer.Types type Rep Timestamp = D1 ('MetaData "Timestamp" "Kafka.Consumer.Types" "hw-kafka-client-5.3.0-BlVq66fKP0NAico3YeT0ux" 'False) (C1 ('MetaCons "CreateTime" 'PrefixI 'False) (S1 ('MetaSel ('Nothing :: Maybe Symbol) 'NoSourceUnpackedness 'SourceStrict 'DecidedStrict) (Rec0 Millis)) :+: (C1 ('MetaCons "LogAppendTime" 'PrefixI 'False) (S1 ('MetaSel ('Nothing :: Maybe Symbol) 'NoSourceUnpackedness 'SourceStrict 'DecidedStrict) (Rec0 Millis)) :+: C1 ('MetaCons "NoTimestamp" 'PrefixI 'False) (U1 :: Type -> Type))) |
data OffsetCommit Source #
Offsets commit mode
Constructors
OffsetCommit | Forces consumer to block until the broker offsets commit is done |
OffsetCommitAsync | Offsets will be committed in a non-blocking way |
Instances
Generic OffsetCommit Source # | |||||
Defined in Kafka.Consumer.Types Associated Types
| |||||
Show OffsetCommit Source # | |||||
Defined in Kafka.Consumer.Types Methods showsPrec :: Int -> OffsetCommit -> ShowS # show :: OffsetCommit -> String # showList :: [OffsetCommit] -> ShowS # | |||||
Eq OffsetCommit Source # | |||||
Defined in Kafka.Consumer.Types | |||||
type Rep OffsetCommit Source # | |||||
Defined in Kafka.Consumer.Types |
data OffsetStoreSync Source #
Indicates how offsets are to be synced to disk
Constructors
OffsetSyncDisable | Do not sync offsets (in Kafka: -1) |
OffsetSyncImmediate | Sync immediately after each offset commit (in Kafka: 0) |
OffsetSyncInterval Int | Sync after specified interval in millis |
Instances
Generic OffsetStoreSync Source # | |||||
Defined in Kafka.Consumer.Types Associated Types
Methods from :: OffsetStoreSync -> Rep OffsetStoreSync x # to :: Rep OffsetStoreSync x -> OffsetStoreSync # | |||||
Show OffsetStoreSync Source # | |||||
Defined in Kafka.Consumer.Types Methods showsPrec :: Int -> OffsetStoreSync -> ShowS # show :: OffsetStoreSync -> String # showList :: [OffsetStoreSync] -> ShowS # | |||||
Eq OffsetStoreSync Source # | |||||
Defined in Kafka.Consumer.Types Methods (==) :: OffsetStoreSync -> OffsetStoreSync -> Bool # (/=) :: OffsetStoreSync -> OffsetStoreSync -> Bool # | |||||
type Rep OffsetStoreSync Source # | |||||
Defined in Kafka.Consumer.Types type Rep OffsetStoreSync = D1 ('MetaData "OffsetStoreSync" "Kafka.Consumer.Types" "hw-kafka-client-5.3.0-BlVq66fKP0NAico3YeT0ux" 'False) (C1 ('MetaCons "OffsetSyncDisable" 'PrefixI 'False) (U1 :: Type -> Type) :+: (C1 ('MetaCons "OffsetSyncImmediate" 'PrefixI 'False) (U1 :: Type -> Type) :+: C1 ('MetaCons "OffsetSyncInterval" 'PrefixI 'False) (S1 ('MetaSel ('Nothing :: Maybe Symbol) 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 Int)))) |
data OffsetStoreMethod Source #
Indicates the method of storing the offsets
Constructors
OffsetStoreBroker | Offsets are stored in Kafka broker (preferred) |
OffsetStoreFile FilePath OffsetStoreSync | Offsets are stored in a file (and synced to disk according to the sync policy) |
Instances
Generic OffsetStoreMethod Source # | |||||
Defined in Kafka.Consumer.Types Associated Types
Methods from :: OffsetStoreMethod -> Rep OffsetStoreMethod x # to :: Rep OffsetStoreMethod x -> OffsetStoreMethod # | |||||
Show OffsetStoreMethod Source # | |||||
Defined in Kafka.Consumer.Types Methods showsPrec :: Int -> OffsetStoreMethod -> ShowS # show :: OffsetStoreMethod -> String # showList :: [OffsetStoreMethod] -> ShowS # | |||||
Eq OffsetStoreMethod Source # | |||||
Defined in Kafka.Consumer.Types Methods (==) :: OffsetStoreMethod -> OffsetStoreMethod -> Bool # (/=) :: OffsetStoreMethod -> OffsetStoreMethod -> Bool # | |||||
type Rep OffsetStoreMethod Source # | |||||
Defined in Kafka.Consumer.Types type Rep OffsetStoreMethod = D1 ('MetaData "OffsetStoreMethod" "Kafka.Consumer.Types" "hw-kafka-client-5.3.0-BlVq66fKP0NAico3YeT0ux" 'False) (C1 ('MetaCons "OffsetStoreBroker" 'PrefixI 'False) (U1 :: Type -> Type) :+: C1 ('MetaCons "OffsetStoreFile" 'PrefixI 'False) (S1 ('MetaSel ('Nothing :: Maybe Symbol) 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 FilePath) :*: S1 ('MetaSel ('Nothing :: Maybe Symbol) 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 OffsetStoreSync))) |
data TopicPartition Source #
Kafka topic partition structure
Constructors
TopicPartition | |
Fields |
Instances
Generic TopicPartition Source # | |||||
Defined in Kafka.Consumer.Types Associated Types
Methods from :: TopicPartition -> Rep TopicPartition x # to :: Rep TopicPartition x -> TopicPartition # | |||||
Show TopicPartition Source # | |||||
Defined in Kafka.Consumer.Types Methods showsPrec :: Int -> TopicPartition -> ShowS # show :: TopicPartition -> String # showList :: [TopicPartition] -> ShowS # | |||||
Eq TopicPartition Source # | |||||
Defined in Kafka.Consumer.Types Methods (==) :: TopicPartition -> TopicPartition -> Bool # (/=) :: TopicPartition -> TopicPartition -> Bool # | |||||
type Rep TopicPartition Source # | |||||
Defined in Kafka.Consumer.Types type Rep TopicPartition = D1 ('MetaData "TopicPartition" "Kafka.Consumer.Types" "hw-kafka-client-5.3.0-BlVq66fKP0NAico3YeT0ux" 'False) (C1 ('MetaCons "TopicPartition" 'PrefixI 'True) (S1 ('MetaSel ('Just "tpTopicName") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 TopicName) :*: (S1 ('MetaSel ('Just "tpPartition") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 PartitionId) :*: S1 ('MetaSel ('Just "tpOffset") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 PartitionOffset)))) |
data ConsumerRecord k v Source #
Represents a received message from Kafka (i.e. used in a consumer)
Constructors
ConsumerRecord | |
Fields
|
Instances
Bifoldable ConsumerRecord Source # | |||||
Defined in Kafka.Consumer.Types Methods bifold :: Monoid m => ConsumerRecord m m -> m # bifoldMap :: Monoid m => (a -> m) -> (b -> m) -> ConsumerRecord a b -> m # bifoldr :: (a -> c -> c) -> (b -> c -> c) -> c -> ConsumerRecord a b -> c # bifoldl :: (c -> a -> c) -> (c -> b -> c) -> c -> ConsumerRecord a b -> c # | |||||
Bifunctor ConsumerRecord Source # | |||||
Defined in Kafka.Consumer.Types Methods bimap :: (a -> b) -> (c -> d) -> ConsumerRecord a c -> ConsumerRecord b d # first :: (a -> b) -> ConsumerRecord a c -> ConsumerRecord b c # second :: (b -> c) -> ConsumerRecord a b -> ConsumerRecord a c # | |||||
Bitraversable ConsumerRecord Source # | |||||
Defined in Kafka.Consumer.Types Methods bitraverse :: Applicative f => (a -> f c) -> (b -> f d) -> ConsumerRecord a b -> f (ConsumerRecord c d) # | |||||
Functor (ConsumerRecord k) Source # | |||||
Defined in Kafka.Consumer.Types Methods fmap :: (a -> b) -> ConsumerRecord k a -> ConsumerRecord k b # (<$) :: a -> ConsumerRecord k b -> ConsumerRecord k a # | |||||
Foldable (ConsumerRecord k) Source # | |||||
Defined in Kafka.Consumer.Types Methods fold :: Monoid m => ConsumerRecord k m -> m # foldMap :: Monoid m => (a -> m) -> ConsumerRecord k a -> m # foldMap' :: Monoid m => (a -> m) -> ConsumerRecord k a -> m # foldr :: (a -> b -> b) -> b -> ConsumerRecord k a -> b # foldr' :: (a -> b -> b) -> b -> ConsumerRecord k a -> b # foldl :: (b -> a -> b) -> b -> ConsumerRecord k a -> b # foldl' :: (b -> a -> b) -> b -> ConsumerRecord k a -> b # foldr1 :: (a -> a -> a) -> ConsumerRecord k a -> a # foldl1 :: (a -> a -> a) -> ConsumerRecord k a -> a # toList :: ConsumerRecord k a -> [a] # null :: ConsumerRecord k a -> Bool # length :: ConsumerRecord k a -> Int # elem :: Eq a => a -> ConsumerRecord k a -> Bool # maximum :: Ord a => ConsumerRecord k a -> a # minimum :: Ord a => ConsumerRecord k a -> a # sum :: Num a => ConsumerRecord k a -> a # product :: Num a => ConsumerRecord k a -> a # | |||||
Traversable (ConsumerRecord k) Source # | |||||
Defined in Kafka.Consumer.Types Methods traverse :: Applicative f => (a -> f b) -> ConsumerRecord k a -> f (ConsumerRecord k b) # sequenceA :: Applicative f => ConsumerRecord k (f a) -> f (ConsumerRecord k a) # mapM :: Monad m => (a -> m b) -> ConsumerRecord k a -> m (ConsumerRecord k b) # sequence :: Monad m => ConsumerRecord k (m a) -> m (ConsumerRecord k a) # | |||||
Generic (ConsumerRecord k v) Source # | |||||
Defined in Kafka.Consumer.Types Associated Types
Methods from :: ConsumerRecord k v -> Rep (ConsumerRecord k v) x # to :: Rep (ConsumerRecord k v) x -> ConsumerRecord k v # | |||||
(Read k, Read v) => Read (ConsumerRecord k v) Source # | |||||
Defined in Kafka.Consumer.Types Methods readsPrec :: Int -> ReadS (ConsumerRecord k v) # readList :: ReadS [ConsumerRecord k v] # readPrec :: ReadPrec (ConsumerRecord k v) # readListPrec :: ReadPrec [ConsumerRecord k v] # | |||||
(Show k, Show v) => Show (ConsumerRecord k v) Source # | |||||
Defined in Kafka.Consumer.Types Methods showsPrec :: Int -> ConsumerRecord k v -> ShowS # show :: ConsumerRecord k v -> String # showList :: [ConsumerRecord k v] -> ShowS # | |||||
(Eq k, Eq v) => Eq (ConsumerRecord k v) Source # | |||||
Defined in Kafka.Consumer.Types Methods (==) :: ConsumerRecord k v -> ConsumerRecord k v -> Bool # (/=) :: ConsumerRecord k v -> ConsumerRecord k v -> Bool # | |||||
type Rep (ConsumerRecord k v) Source # | |||||
Defined in Kafka.Consumer.Types type Rep (ConsumerRecord k v) = D1 ('MetaData "ConsumerRecord" "Kafka.Consumer.Types" "hw-kafka-client-5.3.0-BlVq66fKP0NAico3YeT0ux" 'False) (C1 ('MetaCons "ConsumerRecord" 'PrefixI 'True) ((S1 ('MetaSel ('Just "crTopic") 'NoSourceUnpackedness 'SourceStrict 'DecidedStrict) (Rec0 TopicName) :*: (S1 ('MetaSel ('Just "crPartition") 'NoSourceUnpackedness 'SourceStrict 'DecidedStrict) (Rec0 PartitionId) :*: S1 ('MetaSel ('Just "crOffset") 'NoSourceUnpackedness 'SourceStrict 'DecidedStrict) (Rec0 Offset))) :*: ((S1 ('MetaSel ('Just "crTimestamp") 'NoSourceUnpackedness 'SourceStrict 'DecidedStrict) (Rec0 Timestamp) :*: S1 ('MetaSel ('Just "crHeaders") 'NoSourceUnpackedness 'SourceStrict 'DecidedStrict) (Rec0 Headers)) :*: (S1 ('MetaSel ('Just "crKey") 'NoSourceUnpackedness 'SourceStrict 'DecidedStrict) (Rec0 k) :*: S1 ('MetaSel ('Just "crValue") 'NoSourceUnpackedness 'SourceStrict 'DecidedStrict) (Rec0 v))))) |
crMapKey :: (k -> k') -> ConsumerRecord k v -> ConsumerRecord k' v Source #
Deprecated: Isn't concern of this library. Use first
crMapValue :: (v -> v') -> ConsumerRecord k v -> ConsumerRecord k v' Source #
Deprecated: Isn't concern of this library. Use second
crMapKV :: (k -> k') -> (v -> v') -> ConsumerRecord k v -> ConsumerRecord k' v' Source #
Deprecated: Isn't concern of this library. Use bimap
sequenceFirst :: (Bitraversable t, Applicative f) => t (f k) v -> f (t k v) Source #
Deprecated: Isn't concern of this library. Use bitraverse
id
pure
traverseFirst :: (Bitraversable t, Applicative f) => (k -> f k') -> t k v -> f (t k' v) Source #
Deprecated: Isn't concern of this library. Use bitraverse
f pure
traverseFirstM :: (Bitraversable t, Applicative f, Monad m) => (k -> m (f k')) -> t k v -> m (f (t k' v)) Source #
Deprecated: Isn't concern of this library. Use bitraverse
id
pure
<$>
bitraverse
f pure
r
traverseM :: (Traversable t, Applicative f, Monad m) => (v -> m (f v')) -> t v -> m (f (t v')) Source #
bitraverseM :: (Bitraversable t, Applicative f, Monad m) => (k -> m (f k')) -> (v -> m (f v')) -> t k v -> m (f (t k' v')) Source #
Deprecated: Isn't concern of this library. Use bisequenceA
<$>
bimapM
f g r
kcKafkaPtr :: KafkaConsumer -> Kafka Source #
kcKafkaConf :: KafkaConsumer -> KafkaConf Source #
Arguments
:: ConsumerProperties | |
-> Subscription | |
-> (KafkaConsumer -> IO (Either KafkaError a)) | A callback function to poll and handle messages |
-> IO (Either KafkaError a) |
Deprecated: Use newConsumer
/closeConsumer
instead
Runs high-level kafka consumer.
A callback provided is expected to call pollMessage
when convenient.
newConsumer :: MonadIO m => ConsumerProperties -> Subscription -> m (Either KafkaError KafkaConsumer) Source #
Create a KafkaConsumer
. This consumer must be correctly released using closeConsumer
.
assign :: MonadIO m => KafkaConsumer -> [TopicPartition] -> m (Maybe KafkaError) Source #
Assigns the consumer to consume from the given topics, partitions, and offsets.
assignment :: MonadIO m => KafkaConsumer -> m (Either KafkaError (Map TopicName [PartitionId])) Source #
Returns current consumer's assignment
subscription :: MonadIO m => KafkaConsumer -> m (Either KafkaError [(TopicName, SubscribedPartitions)]) Source #
Returns current consumer's subscription
pausePartitions :: MonadIO m => KafkaConsumer -> [(TopicName, PartitionId)] -> m KafkaError Source #
Pauses specified partitions on the current consumer.
resumePartitions :: MonadIO m => KafkaConsumer -> [(TopicName, PartitionId)] -> m KafkaError Source #
Resumes specified partitions on the current consumer.
committed :: MonadIO m => KafkaConsumer -> Timeout -> [(TopicName, PartitionId)] -> m (Either KafkaError [TopicPartition]) Source #
Retrieve committed offsets for topics+partitions.
position :: MonadIO m => KafkaConsumer -> [(TopicName, PartitionId)] -> m (Either KafkaError [TopicPartition]) Source #
Retrieve current positions (last consumed message offset+1) for the current running instance of the consumer.
If the current consumer hasn't received any messages for a given partition, PartitionOffsetInvalid
is returned.
seek :: MonadIO m => KafkaConsumer -> Timeout -> [TopicPartition] -> m (Maybe KafkaError) Source #
Deprecated: Use seekPartitions instead
Seek a particular offset for each provided TopicPartition
seekPartitions :: MonadIO m => KafkaConsumer -> [TopicPartition] -> Timeout -> m (Maybe KafkaError) Source #
Seek consumer for partitions in partitions to the per-partition offset in the offset field of partitions.
Arguments
:: MonadIO m | |
=> KafkaConsumer | |
-> Timeout | the timeout, in milliseconds |
-> m (Either KafkaError (ConsumerRecord (Maybe ByteString) (Maybe ByteString))) | Left on error or timeout, right for success |
Polls a single message
pollConsumerEvents :: KafkaConsumer -> Maybe Timeout -> IO () Source #
Polls the provided kafka consumer for events.
Events will cause application provided callbacks to be called.
The Timeout
argument specifies the maximum amount of time
(in milliseconds) that the call will block waiting for events.
This function is called on each pollMessage
and, if runtime allows
multi threading, it is called periodically in a separate thread
to ensure the callbacks are handled ASAP.
There is no particular need to call this function manually
unless some special cases in a single-threaded environment
when polling for events on each pollMessage
is not
frequent enough.
pollMessageBatch :: MonadIO m => KafkaConsumer -> Timeout -> BatchSize -> m [Either KafkaError (ConsumerRecord (Maybe ByteString) (Maybe ByteString))] Source #
Polls up to BatchSize
messages.
Unlike pollMessage
this function does not return usual "timeout" errors.
An empty batch is returned when there are no messages available.
This API is not available when CallbackPollMode
is set to CallbackPollModeSync
.
commitOffsetMessage :: MonadIO m => OffsetCommit -> KafkaConsumer -> ConsumerRecord k v -> m (Maybe KafkaError) Source #
Commit message's offset on broker for the message's partition.
commitAllOffsets :: MonadIO m => OffsetCommit -> KafkaConsumer -> m (Maybe KafkaError) Source #
Commit offsets for all currently assigned partitions.
commitPartitionsOffsets :: MonadIO m => OffsetCommit -> KafkaConsumer -> [TopicPartition] -> m (Maybe KafkaError) Source #
Commit offsets for all currently assigned partitions.
storeOffsets :: MonadIO m => KafkaConsumer -> [TopicPartition] -> m (Maybe KafkaError) Source #
Stores offsets locally
storeOffsetMessage :: MonadIO m => KafkaConsumer -> ConsumerRecord k v -> m (Maybe KafkaError) Source #
Stores message's offset locally for the message's partition.
rewindConsumer :: MonadIO m => KafkaConsumer -> Timeout -> m (Maybe KafkaError) Source #
Rewind consumer's consume position to the last committed offsets for the current assignment. NOTE: follows https://github.com/edenhill/librdkafka/blob/master/examples/transactions.c#L166
closeConsumer :: MonadIO m => KafkaConsumer -> m (Maybe KafkaError) Source #
Closes the consumer.
See newConsumer
data RdKafkaRespErrT Source #
Constructors
RdKafkaRespErrBegin | |
RdKafkaRespErrBadMsg | |
RdKafkaRespErrBadCompression | |
RdKafkaRespErrDestroy | |
RdKafkaRespErrFail | |
RdKafkaRespErrTransport | |
RdKafkaRespErrCritSysResource | |
RdKafkaRespErrResolve | |
RdKafkaRespErrMsgTimedOut | |
RdKafkaRespErrPartitionEof | |
RdKafkaRespErrUnknownPartition | |
RdKafkaRespErrFs | |
RdKafkaRespErrUnknownTopic | |
RdKafkaRespErrAllBrokersDown | |
RdKafkaRespErrInvalidArg | |
RdKafkaRespErrTimedOut | |
RdKafkaRespErrQueueFull | |
RdKafkaRespErrIsrInsuff | |
RdKafkaRespErrNodeUpdate | |
RdKafkaRespErrSsl | |
RdKafkaRespErrWaitCoord | |
RdKafkaRespErrUnknownGroup | |
RdKafkaRespErrInProgress | |
RdKafkaRespErrPrevInProgress | |
RdKafkaRespErrExistingSubscription | |
RdKafkaRespErrAssignPartitions | |
RdKafkaRespErrRevokePartitions | |
RdKafkaRespErrConflict | |
RdKafkaRespErrState | |
RdKafkaRespErrUnknownProtocol | |
RdKafkaRespErrNotImplemented | |
RdKafkaRespErrAuthentication | |
RdKafkaRespErrNoOffset | |
RdKafkaRespErrOutdated | |
RdKafkaRespErrTimedOutQueue | |
RdKafkaRespErrUnsupportedFeature | |
RdKafkaRespErrWaitCache | |
RdKafkaRespErrIntr | |
RdKafkaRespErrKeySerialization | |
RdKafkaRespErrValueSerialization | |
RdKafkaRespErrKeyDeserialization | |
RdKafkaRespErrValueDeserialization | |
RdKafkaRespErrPartial | |
RdKafkaRespErrReadOnly | |
RdKafkaRespErrNoent | |
RdKafkaRespErrUnderflow | |
RdKafkaRespErrInvalidType | |
RdKafkaRespErrRetry | |
RdKafkaRespErrPurgeQueue | |
RdKafkaRespErrPurgeInflight | |
RdKafkaRespErrFatal | |
RdKafkaRespErrInconsistent | |
RdKafkaRespErrGaplessGuarantee | |
RdKafkaRespErrMaxPollExceeded | |
RdKafkaRespErrUnknownBroker | |
RdKafkaRespErrNotConfigured | |
RdKafkaRespErrFenced | |
RdKafkaRespErrApplication | |
RdKafkaRespErrAssignmentLost | |
RdKafkaRespErrNoop | |
RdKafkaRespErrAutoOffsetReset | |
RdKafkaRespErrLogTruncation | |
RdKafkaRespErrEnd | |
RdKafkaRespErrUnknown | |
RdKafkaRespErrNoError | |
RdKafkaRespErrOffsetOutOfRange | |
RdKafkaRespErrInvalidMsg | |
RdKafkaRespErrUnknownTopicOrPart | |
RdKafkaRespErrInvalidMsgSize | |
RdKafkaRespErrLeaderNotAvailable | |
RdKafkaRespErrNotLeaderForPartition | |
RdKafkaRespErrRequestTimedOut | |
RdKafkaRespErrBrokerNotAvailable | |
RdKafkaRespErrReplicaNotAvailable | |
RdKafkaRespErrMsgSizeTooLarge | |
RdKafkaRespErrStaleCtrlEpoch | |
RdKafkaRespErrOffsetMetadataTooLarge | |
RdKafkaRespErrNetworkException | |
RdKafkaRespErrCoordinatorLoadInProgress | |
RdKafkaRespErrCoordinatorNotAvailable | |
RdKafkaRespErrNotCoordinator | |
RdKafkaRespErrTopicException | |
RdKafkaRespErrRecordListTooLarge | |
RdKafkaRespErrNotEnoughReplicas | |
RdKafkaRespErrNotEnoughReplicasAfterAppend | |
RdKafkaRespErrInvalidRequiredAcks | |
RdKafkaRespErrIllegalGeneration | |
RdKafkaRespErrInconsistentGroupProtocol | |
RdKafkaRespErrInvalidGroupId | |
RdKafkaRespErrUnknownMemberId | |
RdKafkaRespErrInvalidSessionTimeout | |
RdKafkaRespErrRebalanceInProgress | |
RdKafkaRespErrInvalidCommitOffsetSize | |
RdKafkaRespErrTopicAuthorizationFailed | |
RdKafkaRespErrGroupAuthorizationFailed | |
RdKafkaRespErrClusterAuthorizationFailed | |
RdKafkaRespErrInvalidTimestamp | |
RdKafkaRespErrUnsupportedSaslMechanism | |
RdKafkaRespErrIllegalSaslState | |
RdKafkaRespErrUnsupportedVersion | |
RdKafkaRespErrTopicAlreadyExists | |
RdKafkaRespErrInvalidPartitions | |
RdKafkaRespErrInvalidReplicationFactor | |
RdKafkaRespErrInvalidReplicaAssignment | |
RdKafkaRespErrInvalidConfig | |
RdKafkaRespErrNotController | |
RdKafkaRespErrInvalidRequest | |
RdKafkaRespErrUnsupportedForMessageFormat | |
RdKafkaRespErrPolicyViolation | |
RdKafkaRespErrOutOfOrderSequenceNumber | |
RdKafkaRespErrDuplicateSequenceNumber | |
RdKafkaRespErrInvalidProducerEpoch | |
RdKafkaRespErrInvalidTxnState | |
RdKafkaRespErrInvalidProducerIdMapping | |
RdKafkaRespErrInvalidTransactionTimeout | |
RdKafkaRespErrConcurrentTransactions | |
RdKafkaRespErrTransactionCoordinatorFenced | |
RdKafkaRespErrTransactionalIdAuthorizationFailed | |
RdKafkaRespErrSecurityDisabled | |
RdKafkaRespErrOperationNotAttempted | |
RdKafkaRespErrKafkaStorageError | |
RdKafkaRespErrLogDirNotFound | |
RdKafkaRespErrSaslAuthenticationFailed | |
RdKafkaRespErrUnknownProducerId | |
RdKafkaRespErrReassignmentInProgress | |
RdKafkaRespErrDelegationTokenAuthDisabled | |
RdKafkaRespErrDelegationTokenNotFound | |
RdKafkaRespErrDelegationTokenOwnerMismatch | |
RdKafkaRespErrDelegationTokenRequestNotAllowed | |
RdKafkaRespErrDelegationTokenAuthorizationFailed | |
RdKafkaRespErrDelegationTokenExpired | |
RdKafkaRespErrInvalidPrincipalType | |
RdKafkaRespErrNonEmptyGroup | |
RdKafkaRespErrGroupIdNotFound | |
RdKafkaRespErrFetchSessionIdNotFound | |
RdKafkaRespErrInvalidFetchSessionEpoch | |
RdKafkaRespErrListenerNotFound | |
RdKafkaRespErrTopicDeletionDisabled | |
RdKafkaRespErrFencedLeaderEpoch | |
RdKafkaRespErrUnknownLeaderEpoch | |
RdKafkaRespErrUnsupportedCompressionType | |
RdKafkaRespErrStaleBrokerEpoch | |
RdKafkaRespErrOffsetNotAvailable | |
RdKafkaRespErrMemberIdRequired | |
RdKafkaRespErrPreferredLeaderNotAvailable | |
RdKafkaRespErrGroupMaxSizeReached | |
RdKafkaRespErrFencedInstanceId | |
RdKafkaRespErrEligibleLeadersNotAvailable | |
RdKafkaRespErrElectionNotNeeded | |
RdKafkaRespErrNoReassignmentInProgress | |
RdKafkaRespErrGroupSubscribedToTopic | |
RdKafkaRespErrInvalidRecord | |
RdKafkaRespErrUnstableOffsetCommit | |
RdKafkaRespErrThrottlingQuotaExceeded | |
RdKafkaRespErrProducerFenced | |
RdKafkaRespErrResourceNotFound | |
RdKafkaRespErrDuplicateResource | |
RdKafkaRespErrUnacceptableCredential | |
RdKafkaRespErrInconsistentVoterSet | |
RdKafkaRespErrInvalidUpdateVersion | |
RdKafkaRespErrFeatureUpdateFailed | |
RdKafkaRespErrPrincipalDeserializationFailure | |
RdKafkaRespErrEndAll |
Instances
Bounded RdKafkaRespErrT Source # | |
Defined in Kafka.Internal.RdKafka | |
Enum RdKafkaRespErrT Source # | |
Defined in Kafka.Internal.RdKafka Methods succ :: RdKafkaRespErrT -> RdKafkaRespErrT # pred :: RdKafkaRespErrT -> RdKafkaRespErrT # toEnum :: Int -> RdKafkaRespErrT # fromEnum :: RdKafkaRespErrT -> Int # enumFrom :: RdKafkaRespErrT -> [RdKafkaRespErrT] # enumFromThen :: RdKafkaRespErrT -> RdKafkaRespErrT -> [RdKafkaRespErrT] # enumFromTo :: RdKafkaRespErrT -> RdKafkaRespErrT -> [RdKafkaRespErrT] # enumFromThenTo :: RdKafkaRespErrT -> RdKafkaRespErrT -> RdKafkaRespErrT -> [RdKafkaRespErrT] # | |
Show RdKafkaRespErrT Source # | |
Defined in Kafka.Internal.RdKafka Methods showsPrec :: Int -> RdKafkaRespErrT -> ShowS # show :: RdKafkaRespErrT -> String # showList :: [RdKafkaRespErrT] -> ShowS # | |
Eq RdKafkaRespErrT Source # | |
Defined in Kafka.Internal.RdKafka Methods (==) :: RdKafkaRespErrT -> RdKafkaRespErrT -> Bool # (/=) :: RdKafkaRespErrT -> RdKafkaRespErrT -> Bool # |