package kafka.api
import java.nio.ByteBuffer
import kafka.api.ApiUtils._
import kafka.common.{TopicAndPartition, OffsetMetadataAndError}
import kafka.utils.Logging
object OffsetFetchResponse extends Logging {
val CurrentVersion: Short = 0
def readFrom(buffer: ByteBuffer): OffsetFetchResponse = {
val correlationId = buffer.getInt
val topicCount = buffer.getInt
val pairs = (1 to topicCount).flatMap(_ => {
val topic = readShortString(buffer)
val partitionCount = buffer.getInt
(1 to partitionCount).map(_ => {
val partitionId = buffer.getInt
val offset = buffer.getLong
val metadata = readShortString(buffer)
val error = buffer.getShort
(TopicAndPartition(topic, partitionId), OffsetMetadataAndError(offset, metadata, error))
})
})
OffsetFetchResponse(Map(pairs:_*), correlationId)
}
}
case class OffsetFetchResponse(requestInfo: Map[TopicAndPartition, OffsetMetadataAndError],
correlationId: Int = 0)
extends RequestOrResponse() {
lazy val requestInfoGroupedByTopic = requestInfo.groupBy(_._1.topic)
def writeTo(buffer: ByteBuffer) {
buffer.putInt(correlationId)
buffer.putInt(requestInfoGroupedByTopic.size)
requestInfoGroupedByTopic.foreach( t1 => {
writeShortString(buffer, t1._1)
buffer.putInt(t1._2.size)
t1._2.foreach( t2 => {
buffer.putInt(t2._1.partition)
buffer.putLong(t2._2.offset)
writeShortString(buffer, t2._2.metadata)
buffer.putShort(t2._2.error)
})
})
}
override def sizeInBytes =
4 +
4 +
requestInfoGroupedByTopic.foldLeft(0)((count, topicAndOffsets) => {
val (topic, offsets) = topicAndOffsets
count +
shortStringLength(topic) +
4 +
offsets.foldLeft(0)((innerCount, offsetsAndMetadata) => {
innerCount +
4 +
8 +
shortStringLength(offsetsAndMetadata._2.metadata) +
2
})
})
override def describe(details: Boolean):String = { toString }
}