@@ -213,16 +213,6 @@ class KafkaConsumer: public KafkaClient
213213 */
214214 std::vector<consumer::ConsumerRecord> poll (std::chrono::milliseconds timeout);
215215
216- /* *
217- * Fetch data for the topics or partitions specified using one of the subscribe/assign APIs.
218- * Returns the number of polled records (which have been saved into parameter `output`).
219- * Note: 1) The result could be fetched through ConsumerRecord (with member function `error`).
220- * 2) Make sure the `ConsumerRecord` be destructed before the `KafkaConsumer.close()`.
221- * Throws KafkaException with errors:
222- * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: Unknow partition
223- */
224- std::size_t poll (std::chrono::milliseconds timeout, std::vector<consumer::ConsumerRecord>& output);
225-
226216 /* *
227217 * Suspend fetching from the requested partitions. Future calls to poll() will not return any records from these partitions until they have been resumed using resume().
228218 * Note: 1) After pausing, the application still need to call `poll()` at regular intervals.
@@ -320,8 +310,6 @@ class KafkaConsumer: public KafkaClient
320310 // Register Callbacks for rd_kafka_conf_t
321311 static void registerConfigCallbacks (rd_kafka_conf_t * conf);
322312
323- void pollMessages (int timeoutMs, std::vector<consumer::ConsumerRecord>& output);
324-
325313 enum class PauseOrResumeOperation { Pause, Resume };
326314 void pauseOrResumePartitions (const TopicPartitions& topicPartitions, PauseOrResumeOperation op);
327315
@@ -820,45 +808,28 @@ KafkaConsumer::storeOffsetsIfNecessary(const std::vector<consumer::ConsumerRecor
820808 }
821809}
822810
823- // Fetch messages (internally used)
824- inline void
825- KafkaConsumer::pollMessages ( int timeoutMs, std::vector<consumer::ConsumerRecord>& output )
811+ // Fetch messages
812+ inline std::vector<consumer::ConsumerRecord>
813+ KafkaConsumer::poll ( std::chrono::milliseconds timeout )
826814{
827815 // Commit the offsets for these messages which had been polled last time (for "enable.auto.commit=true" case)
828816 commitStoredOffsetsIfNecessary (CommitType::Async);
829817
830818 // Poll messages with librdkafka's API
831819 std::vector<rd_kafka_message_t *> msgPtrArray (_maxPollRecords);
832- auto msgReceived = rd_kafka_consume_batch_queue (_rk_queue.get (), timeoutMs , msgPtrArray.data (), _maxPollRecords);
820+ auto msgReceived = rd_kafka_consume_batch_queue (_rk_queue.get (), convertMsDurationToInt (timeout) , msgPtrArray.data (), _maxPollRecords);
833821 if (msgReceived < 0 )
834822 {
835823 KAFKA_THROW_ERROR (Error (rd_kafka_last_error ()));
836824 }
837825
838826 // Wrap messages with ConsumerRecord
839- output.clear ();
840- output.reserve (static_cast <std::size_t >(msgReceived));
841- std::for_each (msgPtrArray.begin (), msgPtrArray.begin () + msgReceived, [&output](rd_kafka_message_t * rkMsg) { output.emplace_back (rkMsg); });
827+ std::vector<consumer::ConsumerRecord> records (msgPtrArray.begin (), msgPtrArray.begin () + msgReceived);
842828
843829 // Store the offsets for all these polled messages (for "enable.auto.commit=true" case)
844- storeOffsetsIfNecessary (output);
845- }
846-
847- // Fetch messages (return via return value)
848- inline std::vector<consumer::ConsumerRecord>
849- KafkaConsumer::poll (std::chrono::milliseconds timeout)
850- {
851- std::vector<consumer::ConsumerRecord> result;
852- poll (timeout, result);
853- return result;
854- }
830+ storeOffsetsIfNecessary (records);
855831
856- // Fetch messages (return via input parameter)
857- inline std::size_t
858- KafkaConsumer::poll (std::chrono::milliseconds timeout, std::vector<consumer::ConsumerRecord>& output)
859- {
860- pollMessages (convertMsDurationToInt (timeout), output);
861- return output.size ();
832+ return records;
862833}
863834
864835inline void
0 commit comments