package kafka.server
import scala.collection._
import kafka.utils.Logging
import kafka.common._
import java.io._
class OffsetCheckpoint(val file: File) extends Logging {
private val lock = new Object()
new File(file + ".tmp").delete()
file.createNewFile()
def write(offsets: Map[TopicAndPartition, Long]) {
lock synchronized {
val temp = new File(file.getAbsolutePath + ".tmp")
val fileOutputStream = new FileOutputStream(temp)
val writer = new BufferedWriter(new OutputStreamWriter(fileOutputStream))
try {
writer.write(0.toString)
writer.newLine()
writer.write(offsets.size.toString)
writer.newLine()
offsets.foreach { case (topicPart, offset) =>
writer.write("%s %d %d".format(topicPart.topic, topicPart.partition, offset))
writer.newLine()
}
writer.flush()
fileOutputStream.getFD().sync()
} finally {
writer.close()
}
if(!temp.renameTo(file)) {
file.delete()
if(!temp.renameTo(file))
throw new IOException("File rename from %s to %s failed.".format(temp.getAbsolutePath, file.getAbsolutePath))
}
}
}
def read(): Map[TopicAndPartition, Long] = {
lock synchronized {
val reader = new BufferedReader(new FileReader(file))
try {
var line = reader.readLine()
if(line == null)
return Map.empty
val version = line.toInt
version match {
case 0 =>
line = reader.readLine()
if(line == null)
return Map.empty
val expectedSize = line.toInt
var offsets = Map[TopicAndPartition, Long]()
line = reader.readLine()
while(line != null) {
val pieces = line.split("\\s+")
if(pieces.length != 3)
throw new IOException("Malformed line in offset checkpoint file: '%s'.".format(line))
val topic = pieces(0)
val partition = pieces(1).toInt
val offset = pieces(2).toLong
offsets += (TopicAndPartition(topic, partition) -> offset)
line = reader.readLine()
}
if(offsets.size != expectedSize)
throw new IOException("Expected %d entries but found only %d".format(expectedSize, offsets.size))
offsets
case _ =>
throw new IOException("Unrecognized version of the highwatermark checkpoint file: " + version)
}
} finally {
reader.close()
}
}
}
}