From 53bdaa5ddaabe0396c61dc1c08a63e8a970f9629 Mon Sep 17 00:00:00 2001 From: furkilic Date: Sat, 11 Jul 2020 08:43:46 +0200 Subject: [PATCH] Init Commit for Simple versioning --- src/main/scala/io/findify/s3mock/S3Mock.scala | 11 +- .../s3mock/error/NoSuchVersionException.scala | 13 ++ .../provider/AbstractInMemoryProvider.scala | 207 ++++++++++++++++++ .../s3mock/provider/FileProvider.scala | 4 +- .../s3mock/provider/InMemoryProvider.scala | 178 ++------------- .../provider/InMemoryVersionedProvider.scala | 63 ++++++ .../io/findify/s3mock/provider/Provider.scala | 2 +- .../InMemoryVersionedMetadataStore.scala | 35 +++ .../CompleteMultipartUploadResult.scala | 4 +- .../io/findify/s3mock/route/GetObject.scala | 3 +- .../findify/s3mock/route/MetadataUtil.scala | 1 + .../io/findify/s3mock/route/PutObject.scala | 8 +- .../route/PutObjectMultipartComplete.scala | 4 +- .../example/JavaBuilderVersionedExample.java | 49 +++++ .../io/findify/s3mock/GetPutObjectTest.scala | 4 +- .../scala/io/findify/s3mock/S3MockTest.scala | 20 +- .../findify/s3mock/VersionedObjectTest.scala | 62 ++++++ 17 files changed, 488 insertions(+), 180 deletions(-) create mode 100644 src/main/scala/io/findify/s3mock/error/NoSuchVersionException.scala create mode 100644 src/main/scala/io/findify/s3mock/provider/AbstractInMemoryProvider.scala create mode 100644 src/main/scala/io/findify/s3mock/provider/InMemoryVersionedProvider.scala create mode 100644 src/main/scala/io/findify/s3mock/provider/metadata/InMemoryVersionedMetadataStore.scala create mode 100644 src/test/java/io/findify/s3mock/example/JavaBuilderVersionedExample.java create mode 100644 src/test/scala/io/findify/s3mock/VersionedObjectTest.scala diff --git a/src/main/scala/io/findify/s3mock/S3Mock.scala b/src/main/scala/io/findify/s3mock/S3Mock.scala index 86fc07f..ba5c632 100644 --- a/src/main/scala/io/findify/s3mock/S3Mock.scala +++ b/src/main/scala/io/findify/s3mock/S3Mock.scala @@ -6,7 +6,7 @@ import akka.http.scaladsl.model.{HttpResponse, StatusCodes} import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.typesafe.scalalogging.LazyLogging -import io.findify.s3mock.provider.{FileProvider, InMemoryProvider, Provider} +import io.findify.s3mock.provider.{FileProvider, InMemoryProvider, InMemoryVersionedProvider, Provider} import io.findify.s3mock.route._ import scala.concurrent.{Await, Future} @@ -131,6 +131,15 @@ object S3Mock { this } + /** + * Use in-memory versioned backend. + * @return + */ + def withInMemoryVersionedBackend(): Builder = { + defaultProvider = new InMemoryVersionedProvider() + this + } + /** * Use file-based backend * @param path Directory to mount diff --git a/src/main/scala/io/findify/s3mock/error/NoSuchVersionException.scala b/src/main/scala/io/findify/s3mock/error/NoSuchVersionException.scala new file mode 100644 index 0000000..fc465b7 --- /dev/null +++ b/src/main/scala/io/findify/s3mock/error/NoSuchVersionException.scala @@ -0,0 +1,13 @@ +package io.findify.s3mock.error + +/** + * Created by furkilic on 7/11/20. + */ +case class NoSuchVersionException(bucket:String, key:String, versionId:String) extends Exception(s"version does not exist: s3://$bucket/$key/$versionId") { + def toXML = + + NoSuchVersion + The resource you requested does not exist + /{bucket}/{key}/{versionId} + +} diff --git a/src/main/scala/io/findify/s3mock/provider/AbstractInMemoryProvider.scala b/src/main/scala/io/findify/s3mock/provider/AbstractInMemoryProvider.scala new file mode 100644 index 0000000..42ed332 --- /dev/null +++ b/src/main/scala/io/findify/s3mock/provider/AbstractInMemoryProvider.scala @@ -0,0 +1,207 @@ +package io.findify.s3mock.provider + +import java.util.UUID + +import akka.http.scaladsl.model.DateTime +import com.amazonaws.services.s3.model.ObjectMetadata +import com.typesafe.scalalogging.LazyLogging +import io.findify.s3mock.error.{NoSuchBucketException, NoSuchKeyException} +import io.findify.s3mock.provider.metadata.MetadataStore +import io.findify.s3mock.request.{CompleteMultipartUpload, CreateBucketConfiguration} +import io.findify.s3mock.response._ +import org.apache.commons.codec.digest.DigestUtils + +import scala.collection.concurrent.TrieMap +import scala.collection.mutable +import scala.util.Random + +abstract class AbstractInMemoryProvider extends Provider with LazyLogging { + protected val mdStore = newMetadataStore + protected val bucketDataStore = new TrieMap[String, BucketContents] + protected val multipartTempStore = new TrieMap[String, mutable.SortedSet[MultipartChunk]] + + protected trait BucketContents { + def getCreationTime: DateTime + + def getKeyInBucket(key: String): Option[KeyContents] + + def putContentsInKeyInBucket(key: String, data: Array[Byte], objectMetadata: ObjectMetadata, lastModificationTime: DateTime = DateTime.now): Unit + + def getKeysInBucket: mutable.Map[String, KeyContents] + + def removeKeyInBucket(key: String): Option[KeyContents] + } + + protected trait KeyContents { + def getLastModificationTime: DateTime + + def getData: Array[Byte] + } + + protected case class MultipartChunk(partNo: Int, data: Array[Byte]) extends Ordered[MultipartChunk] { + override def compare(that: MultipartChunk): Int = partNo compareTo that.partNo + } + + def newBucketContents(creationTime: DateTime): BucketContents + + def newMetadataStore: MetadataStore + + override def metadataStore: MetadataStore = mdStore + + override def listBuckets: ListAllMyBuckets = { + val buckets = bucketDataStore map { case (name, data: BucketContents) => Bucket(name, data.getCreationTime) } + logger.debug(s"listing buckets: ${buckets.map(_.name)}") + ListAllMyBuckets("root", UUID.randomUUID().toString, buckets.toList) + } + + override def listBucket(bucket: String, prefix: Option[String], delimiter: Option[String], maxkeys: Option[Int]): ListBucket = { + def commonPrefix(dir: String, p: String, d: String): Option[String] = { + dir.indexOf(d, p.length) match { + case -1 => None + case pos => Some(p + dir.substring(p.length, pos) + d) + } + } + + val prefix2 = prefix.getOrElse("") + bucketDataStore.get(bucket) match { + case Some(bucketContent) => + val matchingKeys = bucketContent.getKeysInBucket.filterKeys(_.startsWith(prefix2)) + val matchResults = matchingKeys map { case (name, contentVersions: KeyContents) => + Content(name, contentVersions.getLastModificationTime, DigestUtils.md5Hex(contentVersions.getData), contentVersions.getData.length, "STANDARD") + } + logger.debug(s"listing bucket contents: ${matchResults.map(_.key)}") + val commonPrefixes = normalizeDelimiter(delimiter) match { + case Some(del) => matchResults.flatMap(f => commonPrefix(f.key, prefix2, del)).toList.sorted.distinct + case None => Nil + } + val filteredFiles: List[Content] = matchResults.filterNot(f => commonPrefixes.exists(p => f.key.startsWith(p))).toList + val count = maxkeys.getOrElse(Int.MaxValue) + val result = filteredFiles.sortBy(_.key) + ListBucket(bucket, prefix, delimiter, commonPrefixes, result.take(count).take(count), isTruncated = result.size > count) + case None => throw NoSuchBucketException(bucket) + } + } + + override def createBucket(name: String, bucketConfig: CreateBucketConfiguration): CreateBucket = { + bucketDataStore.putIfAbsent(name, newBucketContents(DateTime.now)) + logger.debug(s"creating bucket $name") + CreateBucket(name) + } + + override def putObject(bucket: String, key: String, data: Array[Byte], objectMetadata: ObjectMetadata): Unit = { + bucketDataStore.get(bucket) match { + case Some(bucketContent) => + logger.debug(s"putting object for s3://$bucket/$key, bytes = ${data.length}") + bucketContent.putContentsInKeyInBucket(key, data, objectMetadata) + objectMetadata.setLastModified(org.joda.time.DateTime.now().toDate) + metadataStore.put(bucket, key, objectMetadata) + case None => throw NoSuchBucketException(bucket) + } + } + + override def copyObjectMultipart(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, part: Int, uploadId: String, fromByte: Int, toByte: Int, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = { + val data = getObject(sourceBucket, sourceKey).bytes.slice(fromByte, toByte + 1) + putObjectMultipartPart(destBucket, destKey, part, uploadId, data) + new CopyObjectResult(DateTime.now, DigestUtils.md5Hex(data)) + } + + override def getObject(bucket: String, key: String, params: Map[String, String] = Map.empty): GetObjectData = { + bucketDataStore.get(bucket) match { + case Some(bucketContent) => bucketContent.getKeyInBucket(key) match { + case Some(keyContent) => + logger.debug(s"reading object for s://$bucket/$key") + val meta = metadataStore.get(bucket, key) + GetObjectData(keyContent.getData, meta) + case None => throw NoSuchKeyException(bucket, key) + } + case None => throw NoSuchBucketException(bucket) + } + } + + override def putObjectMultipartStart(bucket: String, key: String, metadata: ObjectMetadata): InitiateMultipartUploadResult = { + bucketDataStore.get(bucket) match { + case Some(_) => + val id = Math.abs(Random.nextLong()).toString + multipartTempStore.putIfAbsent(id, new mutable.TreeSet) + metadataStore.put(bucket, key, metadata) + logger.debug(s"starting multipart upload for s3://$bucket/$key") + InitiateMultipartUploadResult(bucket, key, id) + case None => throw NoSuchBucketException(bucket) + } + } + + override def putObjectMultipartPart(bucket: String, key: String, partNumber: Int, uploadId: String, data: Array[Byte]): Unit = { + bucketDataStore.get(bucket) match { + case Some(_) => + logger.debug(s"uploading multipart chunk $partNumber for s3://$bucket/$key") + multipartTempStore.getOrElseUpdate(uploadId, new mutable.TreeSet).add(MultipartChunk(partNumber, data)) + case None => throw NoSuchBucketException(bucket) + } + } + + override def putObjectMultipartComplete(bucket: String, key: String, uploadId: String, request: CompleteMultipartUpload): CompleteMultipartUploadResult = { + bucketDataStore.get(bucket) match { + case Some(bucketContent) => + val completeBytes = multipartTempStore(uploadId).toSeq.map(_.data).fold(Array[Byte]())(_ ++ _) + val objectMetadata: ObjectMetadata = metadataStore.get(bucket, key).get + bucketContent.putContentsInKeyInBucket(key, completeBytes, objectMetadata) + multipartTempStore.remove(uploadId) + logger.debug(s"completed multipart upload for s3://$bucket/$key") + val hash = DigestUtils.md5Hex(completeBytes) + metadataStore.get(bucket, key).foreach { m => + m.setContentMD5(hash) + m.setLastModified(org.joda.time.DateTime.now().toDate) + } + CompleteMultipartUploadResult(bucket, key, hash, objectMetadata) + case None => throw NoSuchBucketException(bucket) + } + } + + override def copyObject(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = { + (bucketDataStore.get(sourceBucket), bucketDataStore.get(destBucket)) match { + case (Some(srcBucketContent), Some(dstBucketContent)) => + srcBucketContent.getKeyInBucket(sourceKey) match { + case Some(srcKeyContent) => + val destFileModTime = DateTime.now + val sourceMeta = newMeta.orElse(metadataStore.get(sourceBucket, sourceKey)) + dstBucketContent.putContentsInKeyInBucket(destKey, srcKeyContent.getData.clone, sourceMeta.get, destFileModTime) + logger.debug(s"Copied s3://$sourceBucket/$sourceKey to s3://$destBucket/$destKey") + sourceMeta.foreach(meta => metadataStore.put(destBucket, destKey, meta)) + CopyObjectResult(destFileModTime, DigestUtils.md5Hex(srcKeyContent.getData)) + case None => throw NoSuchKeyException(sourceBucket, sourceKey) + } + case (None, _) => throw NoSuchBucketException(sourceBucket) + case _ => throw NoSuchBucketException(destBucket) + } + } + + override def deleteObject(bucket: String, key: String): Unit = { + bucketDataStore.get(bucket) match { + case Some(bucketContent) => bucketContent.getKeyInBucket(key) match { + case Some(_) => + logger.debug(s"deleting object s://$bucket/$key") + bucketContent.removeKeyInBucket(key) + metadataStore.delete(bucket, key) + case None => bucketContent.getKeysInBucket.keys.find(_.startsWith(key)) match { + case Some(_) => + logger.debug(s"recursive delete by prefix is not supported by S3") + () + case None => + logger.warn(s"key does not exist") + throw NoSuchKeyException(bucket, key) + } + } + case None => throw NoSuchBucketException(bucket) + } + } + + override def deleteBucket(bucket: String): Unit = { + bucketDataStore.get(bucket) match { + case Some(_) => + logger.debug(s"deleting bucket s://$bucket") + bucketDataStore.remove(bucket) + metadataStore.remove(bucket) + case None => throw NoSuchBucketException(bucket) + } + } +} diff --git a/src/main/scala/io/findify/s3mock/provider/FileProvider.scala b/src/main/scala/io/findify/s3mock/provider/FileProvider.scala index f9a3341..45940be 100644 --- a/src/main/scala/io/findify/s3mock/provider/FileProvider.scala +++ b/src/main/scala/io/findify/s3mock/provider/FileProvider.scala @@ -83,7 +83,7 @@ class FileProvider(dir:String) extends Provider with LazyLogging { objectMetadata.setLastModified(org.joda.time.DateTime.now().toDate) metadataStore.put(bucket, key, objectMetadata) } - override def getObject(bucket:String, key:String): GetObjectData = { + override def getObject(bucket: String, key: String, params: Map[String, String] = Map.empty): GetObjectData = { val bucketFile = File(s"$dir/$bucket") val file = File(s"$dir/$bucket/$key") logger.debug(s"reading object for s3://$bucket/$key") @@ -127,7 +127,7 @@ class FileProvider(dir:String) extends Provider with LazyLogging { m.setLastModified(org.joda.time.DateTime.now().toDate) } logger.debug(s"completed multipart upload for s3://$bucket/$key") - CompleteMultipartUploadResult(bucket, key, hash) + CompleteMultipartUploadResult(bucket, key, hash, metadataStore.get(bucket, key).get) } override def copyObject(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = { diff --git a/src/main/scala/io/findify/s3mock/provider/InMemoryProvider.scala b/src/main/scala/io/findify/s3mock/provider/InMemoryProvider.scala index a9f4ecd..c7d6c2f 100644 --- a/src/main/scala/io/findify/s3mock/provider/InMemoryProvider.scala +++ b/src/main/scala/io/findify/s3mock/provider/InMemoryProvider.scala @@ -1,189 +1,37 @@ package io.findify.s3mock.provider -import java.time.Instant -import java.util.{Date, UUID} - import akka.http.scaladsl.model.DateTime import com.amazonaws.services.s3.model.ObjectMetadata import com.typesafe.scalalogging.LazyLogging -import io.findify.s3mock.error.{NoSuchBucketException, NoSuchKeyException} import io.findify.s3mock.provider.metadata.{InMemoryMetadataStore, MetadataStore} -import io.findify.s3mock.request.{CompleteMultipartUpload, CreateBucketConfiguration} -import io.findify.s3mock.response._ -import org.apache.commons.codec.digest.DigestUtils import scala.collection.concurrent.TrieMap import scala.collection.mutable -import scala.util.Random - -class InMemoryProvider extends Provider with LazyLogging { - private val mdStore = new InMemoryMetadataStore - private val bucketDataStore = new TrieMap[String, BucketContents] - private val multipartTempStore = new TrieMap[String, mutable.SortedSet[MultipartChunk]] - - private case class BucketContents(creationTime: DateTime, keysInBucket: mutable.Map[String, KeyContents]) - - private case class KeyContents(lastModificationTime: DateTime, data: Array[Byte]) - - private case class MultipartChunk(partNo: Int, data: Array[Byte]) extends Ordered[MultipartChunk] { - override def compare(that: MultipartChunk): Int = partNo compareTo that.partNo - } - - override def metadataStore: MetadataStore = mdStore - override def listBuckets: ListAllMyBuckets = { - val buckets = bucketDataStore map { case (name, data) => Bucket(name, data.creationTime) } - logger.debug(s"listing buckets: ${buckets.map(_.name)}") - ListAllMyBuckets("root", UUID.randomUUID().toString, buckets.toList) - } +class InMemoryProvider extends AbstractInMemoryProvider with LazyLogging { - override def listBucket(bucket: String, prefix: Option[String], delimiter: Option[String], maxkeys: Option[Int]): ListBucket = { - def commonPrefix(dir: String, p: String, d: String): Option[String] = { - dir.indexOf(d, p.length) match { - case -1 => None - case pos => Some(p + dir.substring(p.length, pos) + d) - } - } + protected case class SimpleBucketContents(creationTime: DateTime, keysInBucket: mutable.Map[String, KeyContents]) extends BucketContents { + override def getCreationTime: DateTime = creationTime - val prefix2 = prefix.getOrElse("") - bucketDataStore.get(bucket) match { - case Some(bucketContent) => - val matchingKeys = bucketContent.keysInBucket.filterKeys(_.startsWith(prefix2)) - val matchResults = matchingKeys map { case (name, content) => - Content(name, content.lastModificationTime, DigestUtils.md5Hex(content.data), content.data.length, "STANDARD") - } - logger.debug(s"listing bucket contents: ${matchResults.map(_.key)}") - val commonPrefixes = normalizeDelimiter(delimiter) match { - case Some(del) => matchResults.flatMap(f => commonPrefix(f.key, prefix2, del)).toList.sorted.distinct - case None => Nil - } - val filteredFiles: List[Content] = matchResults.filterNot(f => commonPrefixes.exists(p => f.key.startsWith(p))).toList - val count = maxkeys.getOrElse(Int.MaxValue) - val result = filteredFiles.sortBy(_.key) - ListBucket(bucket, prefix, delimiter, commonPrefixes, result.take(count).take(count), isTruncated = result.size>count) - case None => throw NoSuchBucketException(bucket) - } - } + override def getKeyInBucket(key: String): Option[KeyContents] = keysInBucket.get(key) - override def createBucket(name: String, bucketConfig: CreateBucketConfiguration): CreateBucket = { - bucketDataStore.putIfAbsent(name, BucketContents(DateTime.now, new TrieMap)) - logger.debug(s"creating bucket $name") - CreateBucket(name) - } - - override def putObject(bucket: String, key: String, data: Array[Byte], objectMetadata: ObjectMetadata): Unit = { - bucketDataStore.get(bucket) match { - case Some(bucketContent) => - logger.debug(s"putting object for s3://$bucket/$key, bytes = ${data.length}") - bucketContent.keysInBucket.put(key, KeyContents(DateTime.now, data)) - objectMetadata.setLastModified(org.joda.time.DateTime.now().toDate) - metadataStore.put(bucket, key, objectMetadata) - case None => throw NoSuchBucketException(bucket) + override def putContentsInKeyInBucket(key: String, data: Array[Byte], objectMetadata: ObjectMetadata, lastModificationTime: DateTime = DateTime.now): Unit = { + keysInBucket.put(key, SimpleKeyContents(lastModificationTime, data)) } - } - override def copyObjectMultipart(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, part: Int, uploadId:String, fromByte: Int, toByte: Int, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = { - val data = getObject(sourceBucket, sourceKey).bytes.slice(fromByte, toByte + 1) - putObjectMultipartPart(destBucket, destKey, part, uploadId, data) - new CopyObjectResult(DateTime.now, DigestUtils.md5Hex(data)) - } + override def getKeysInBucket: mutable.Map[String, KeyContents] = keysInBucket - override def getObject(bucket: String, key: String): GetObjectData = { - bucketDataStore.get(bucket) match { - case Some(bucketContent) => bucketContent.keysInBucket.get(key) match { - case Some(keyContent) => - logger.debug(s"reading object for s://$bucket/$key") - val meta = metadataStore.get(bucket, key) - GetObjectData(keyContent.data, meta) - case None => throw NoSuchKeyException(bucket, key) - } - case None => throw NoSuchBucketException(bucket) - } + override def removeKeyInBucket(key: String): Option[KeyContents] = keysInBucket.remove(key) } - override def putObjectMultipartStart(bucket: String, key: String, metadata: ObjectMetadata): InitiateMultipartUploadResult = { - bucketDataStore.get(bucket) match { - case Some(_) => - val id = Math.abs(Random.nextLong()).toString - multipartTempStore.putIfAbsent(id, new mutable.TreeSet) - metadataStore.put(bucket, key, metadata) - logger.debug(s"starting multipart upload for s3://$bucket/$key") - InitiateMultipartUploadResult(bucket, key, id) - case None => throw NoSuchBucketException(bucket) - } - } - - override def putObjectMultipartPart(bucket: String, key: String, partNumber: Int, uploadId: String, data: Array[Byte]): Unit = { - bucketDataStore.get(bucket) match { - case Some(_) => - logger.debug(s"uploading multipart chunk $partNumber for s3://$bucket/$key") - multipartTempStore.getOrElseUpdate(uploadId, new mutable.TreeSet).add(MultipartChunk(partNumber, data)) - case None => throw NoSuchBucketException(bucket) - } - } + protected case class SimpleKeyContents(lastModificationTime: DateTime, data: Array[Byte]) extends KeyContents { + override def getLastModificationTime: DateTime = lastModificationTime - override def putObjectMultipartComplete(bucket: String, key: String, uploadId: String, request: CompleteMultipartUpload): CompleteMultipartUploadResult = { - bucketDataStore.get(bucket) match { - case Some(bucketContent) => - val completeBytes = multipartTempStore(uploadId).toSeq.map(_.data).fold(Array[Byte]())(_ ++ _) - bucketContent.keysInBucket.put(key, KeyContents(DateTime.now, completeBytes)) - multipartTempStore.remove(uploadId) - logger.debug(s"completed multipart upload for s3://$bucket/$key") - val hash = DigestUtils.md5Hex(completeBytes) - metadataStore.get(bucket, key).foreach {m => - m.setContentMD5(hash) - m.setLastModified(org.joda.time.DateTime.now().toDate) - } - CompleteMultipartUploadResult(bucket, key, hash) - case None => throw NoSuchBucketException(bucket) - } + override def getData: Array[Byte] = data } - override def copyObject(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = { - (bucketDataStore.get(sourceBucket), bucketDataStore.get(destBucket)) match { - case (Some(srcBucketContent), Some(dstBucketContent)) => - srcBucketContent.keysInBucket.get(sourceKey) match { - case Some(srcKeyContent) => - val destFileModTime = DateTime.now - dstBucketContent.keysInBucket.put(destKey, KeyContents(destFileModTime, srcKeyContent.data.clone)) - logger.debug(s"Copied s3://$sourceBucket/$sourceKey to s3://$destBucket/$destKey") - val sourceMeta = newMeta.orElse(metadataStore.get(sourceBucket, sourceKey)) - sourceMeta.foreach(meta => metadataStore.put(destBucket, destKey, meta)) - CopyObjectResult(destFileModTime, DigestUtils.md5Hex(srcKeyContent.data)) - case None => throw NoSuchKeyException(sourceBucket, sourceKey) - } - case (None, _) => throw NoSuchBucketException(sourceBucket) - case _ => throw NoSuchBucketException(destBucket) - } - } - override def deleteObject(bucket: String, key: String): Unit = { - bucketDataStore.get(bucket) match { - case Some(bucketContent) => bucketContent.keysInBucket.get(key) match { - case Some(_) => - logger.debug(s"deleting object s://$bucket/$key") - bucketContent.keysInBucket.remove(key) - metadataStore.delete(bucket, key) - case None => bucketContent.keysInBucket.keys.find(_.startsWith(key)) match { - case Some(_) => - logger.debug(s"recursive delete by prefix is not supported by S3") - () - case None => - logger.warn(s"key does not exist") - throw NoSuchKeyException(bucket, key) - } - } - case None => throw NoSuchBucketException(bucket) - } - } + override def newBucketContents(creationTime: DateTime): BucketContents = SimpleBucketContents(creationTime, new TrieMap) - override def deleteBucket(bucket: String): Unit = { - bucketDataStore.get(bucket) match { - case Some(_) => - logger.debug(s"deleting bucket s://$bucket") - bucketDataStore.remove(bucket) - metadataStore.remove(bucket) - case None => throw NoSuchBucketException(bucket) - } - } + override def newMetadataStore: MetadataStore = new InMemoryMetadataStore } diff --git a/src/main/scala/io/findify/s3mock/provider/InMemoryVersionedProvider.scala b/src/main/scala/io/findify/s3mock/provider/InMemoryVersionedProvider.scala new file mode 100644 index 0000000..62ed4ac --- /dev/null +++ b/src/main/scala/io/findify/s3mock/provider/InMemoryVersionedProvider.scala @@ -0,0 +1,63 @@ +package io.findify.s3mock.provider + +import akka.http.scaladsl.model.DateTime +import com.amazonaws.services.s3.model.ObjectMetadata +import com.typesafe.scalalogging.LazyLogging +import io.findify.s3mock.error.{NoSuchBucketException, NoSuchKeyException, NoSuchVersionException} +import io.findify.s3mock.provider.metadata.{InMemoryVersionedMetadataStore, MetadataStore} + +import scala.collection.concurrent.TrieMap +import scala.collection.mutable + +/** + * Created by furkilic on 7/11/20. + */ + +class InMemoryVersionedProvider extends AbstractInMemoryProvider with LazyLogging { + + protected case class VersionedBucketContents(creationTime: DateTime, keysInBucket: mutable.Map[String, List[VersionedKeyContents]]) extends BucketContents { + override def getCreationTime: DateTime = creationTime + + override def getKeyInBucket(key: String): Option[KeyContents] = keysInBucket.get(key).map(_.last) + + override def putContentsInKeyInBucket(key: String, data: Array[Byte], objectMetadata: ObjectMetadata, lastModificationTime: DateTime = DateTime.now): Unit = { + keysInBucket.put(key, keysInBucket.getOrElse(key, List()) :+ VersionedKeyContents(DateTime.now, data, objectMetadata.getVersionId)) + } + + override def getKeysInBucket: mutable.Map[String, KeyContents] = keysInBucket map { case (name: String, list: List[VersionedKeyContents]) => (name, list.last) } + + override def removeKeyInBucket(key: String): Option[KeyContents] = keysInBucket.remove(key).map(_.last) + } + + protected case class VersionedKeyContents(lastModificationTime: DateTime, data: Array[Byte], versionId: String) extends KeyContents { + override def getLastModificationTime: DateTime = lastModificationTime + + override def getData: Array[Byte] = data + } + + + override def newBucketContents(creationTime: DateTime): BucketContents = VersionedBucketContents(creationTime, new TrieMap) + + override def newMetadataStore: MetadataStore = new InMemoryVersionedMetadataStore + + override def getObject(bucket: String, key: String, params: Map[String, String] = Map.empty): GetObjectData = { + params.get("versionId") match { + case Some(versionId) => bucketDataStore.get(bucket) match { + case Some(bucketContent: VersionedBucketContents) => bucketContent.keysInBucket.get(key) match { + case Some(contentVersions) => contentVersions.find(_.versionId == versionId) match { + case Some(keyContent) => + logger.debug(s"reading object for s://$bucket/$key/$versionId") + var meta = metadataStore.asInstanceOf[InMemoryVersionedMetadataStore].get(bucket, key, versionId) + GetObjectData(keyContent.data, meta) + case None => throw NoSuchVersionException(bucket, key, versionId) + } + case None => throw NoSuchKeyException(bucket, key) + } + case None => throw NoSuchBucketException(bucket) + } + case None => super.getObject(bucket, key) + } + + } + +} diff --git a/src/main/scala/io/findify/s3mock/provider/Provider.scala b/src/main/scala/io/findify/s3mock/provider/Provider.scala index cef2e68..aeb78d3 100644 --- a/src/main/scala/io/findify/s3mock/provider/Provider.scala +++ b/src/main/scala/io/findify/s3mock/provider/Provider.scala @@ -17,7 +17,7 @@ trait Provider { def listBucket(bucket:String, prefix:Option[String], delimiter: Option[String], maxkeys: Option[Int]):ListBucket def createBucket(name:String, bucketConfig:CreateBucketConfiguration):CreateBucket def putObject(bucket:String, key:String, data:Array[Byte], metadata: ObjectMetadata):Unit - def getObject(bucket:String, key:String): GetObjectData + def getObject(bucket:String, key:String, params: Map[String, String] = Map.empty): GetObjectData def putObjectMultipartStart(bucket:String, key:String, metadata: ObjectMetadata):InitiateMultipartUploadResult def putObjectMultipartPart(bucket:String, key:String, partNumber:Int, uploadId:String, data:Array[Byte]):Unit def putObjectMultipartComplete(bucket:String, key:String, uploadId:String, request:CompleteMultipartUpload):CompleteMultipartUploadResult diff --git a/src/main/scala/io/findify/s3mock/provider/metadata/InMemoryVersionedMetadataStore.scala b/src/main/scala/io/findify/s3mock/provider/metadata/InMemoryVersionedMetadataStore.scala new file mode 100644 index 0000000..788f9e6 --- /dev/null +++ b/src/main/scala/io/findify/s3mock/provider/metadata/InMemoryVersionedMetadataStore.scala @@ -0,0 +1,35 @@ +package io.findify.s3mock.provider.metadata + +import com.amazonaws.services.s3.model.ObjectMetadata + +import scala.collection.concurrent.TrieMap +import scala.collection.mutable + +/** + * Created by furkilic on 7/11/20. + */ + +class InMemoryVersionedMetadataStore extends MetadataStore { + + private val bucketMetadata = new TrieMap[String, mutable.Map[String, List[ObjectMetadata]]] + + override def put(bucket: String, key: String, meta: ObjectMetadata): Unit = { + val currentBucketMetadata = bucketMetadata.getOrElseUpdate(bucket, new TrieMap[String, List[ObjectMetadata]]()) + currentBucketMetadata.put(key, currentBucketMetadata.getOrElseUpdate(key, List()) :+ meta) + } + + override def get(bucket: String, key: String): Option[ObjectMetadata] = { + bucketMetadata.get(bucket).flatMap(_.get(key)).map(_.last) + } + + override def delete(bucket: String, key: String): Unit = { + val currentBucketMetadata = bucketMetadata.get(bucket) + currentBucketMetadata.flatMap(_.remove(key)) + } + + override def remove(bucket: String): Unit = bucketMetadata.remove(bucket) + + def get(bucket: String, key: String, versionId: String): Option[ObjectMetadata] = { + bucketMetadata.get(bucket).flatMap(_.get(key)).flatMap(_.find(_.getVersionId == versionId)) + } +} diff --git a/src/main/scala/io/findify/s3mock/response/CompleteMultipartUploadResult.scala b/src/main/scala/io/findify/s3mock/response/CompleteMultipartUploadResult.scala index 511b107..9554202 100644 --- a/src/main/scala/io/findify/s3mock/response/CompleteMultipartUploadResult.scala +++ b/src/main/scala/io/findify/s3mock/response/CompleteMultipartUploadResult.scala @@ -2,10 +2,12 @@ package io.findify.s3mock.response import java.net.URLDecoder +import com.amazonaws.services.s3.model.ObjectMetadata + /** * Created by shutty on 8/10/16. */ -case class CompleteMultipartUploadResult(bucket:String, key:String, etag:String) { +case class CompleteMultipartUploadResult(bucket:String, key:String, etag:String, objectMetadata: ObjectMetadata) { def toXML = http://s3.amazonaws.com/{bucket}/{key} diff --git a/src/main/scala/io/findify/s3mock/route/GetObject.scala b/src/main/scala/io/findify/s3mock/route/GetObject.scala index f9de6cb..8434ef6 100644 --- a/src/main/scala/io/findify/s3mock/route/GetObject.scala +++ b/src/main/scala/io/findify/s3mock/route/GetObject.scala @@ -28,8 +28,7 @@ case class GetObject()(implicit provider: Provider) extends LazyLogging { respondWithDefaultHeader(`Last-Modified`(DateTime(1970, 1, 1))) { complete { logger.debug(s"get object: bucket=$bucket, path=$path") - - Try(provider.getObject(bucket, path)) match { + Try(provider.getObject(bucket, path, params)) match { case Success(GetObjectData(data, metaOption)) => metaOption match { case Some(meta) => diff --git a/src/main/scala/io/findify/s3mock/route/MetadataUtil.scala b/src/main/scala/io/findify/s3mock/route/MetadataUtil.scala index 3c71a9e..afebeab 100644 --- a/src/main/scala/io/findify/s3mock/route/MetadataUtil.scala +++ b/src/main/scala/io/findify/s3mock/route/MetadataUtil.scala @@ -70,6 +70,7 @@ object MetadataUtil extends LazyLogging { if(metadata.getContentType == null){ metadata.setContentType(request.entity.getContentType.toString) } + metadata.setHeader(Headers.S3_VERSION_ID, System.currentTimeMillis().toString) metadata } } diff --git a/src/main/scala/io/findify/s3mock/route/PutObject.scala b/src/main/scala/io/findify/s3mock/route/PutObject.scala index a4c1664..08df854 100644 --- a/src/main/scala/io/findify/s3mock/route/PutObject.scala +++ b/src/main/scala/io/findify/s3mock/route/PutObject.scala @@ -1,10 +1,12 @@ package io.findify.s3mock.route +import akka.http.scaladsl.model.headers.RawHeader import akka.http.scaladsl.model.{HttpRequest, HttpResponse, StatusCodes} import akka.http.scaladsl.server.Directives._ import akka.stream.Materializer import akka.stream.scaladsl.Sink import akka.util.ByteString +import com.amazonaws.services.s3.Headers import com.amazonaws.services.s3.model.ObjectMetadata import com.typesafe.scalalogging.LazyLogging import io.findify.s3mock.S3ChunkedProtocolStage @@ -41,7 +43,8 @@ case class PutObject()(implicit provider:Provider, mat:Materializer) extends Laz val bytes = data.toArray val metadata = populateObjectMetadata(request, bytes) Try(provider.putObject(bucket, path, bytes, metadata)) match { - case Success(()) => HttpResponse(StatusCodes.OK) + case Success(()) => + HttpResponse(StatusCodes.OK).withHeaders(RawHeader(Headers.S3_VERSION_ID, metadata.getVersionId)) case Failure(e: NoSuchBucketException) => HttpResponse( StatusCodes.NotFound, @@ -68,7 +71,8 @@ case class PutObject()(implicit provider:Provider, mat:Materializer) extends Laz val bytes = data.toArray val metadata = populateObjectMetadata(request, bytes) Try(provider.putObject(bucket, path, bytes, metadata)) match { - case Success(()) => HttpResponse(StatusCodes.OK) + case Success(()) => + HttpResponse(StatusCodes.OK).withHeaders(RawHeader(Headers.S3_VERSION_ID, metadata.getVersionId)) case Failure(e: NoSuchBucketException) => HttpResponse( StatusCodes.NotFound, diff --git a/src/main/scala/io/findify/s3mock/route/PutObjectMultipartComplete.scala b/src/main/scala/io/findify/s3mock/route/PutObjectMultipartComplete.scala index f2655c7..9f34a1a 100644 --- a/src/main/scala/io/findify/s3mock/route/PutObjectMultipartComplete.scala +++ b/src/main/scala/io/findify/s3mock/route/PutObjectMultipartComplete.scala @@ -1,7 +1,9 @@ package io.findify.s3mock.route import akka.http.scaladsl.model._ +import akka.http.scaladsl.model.headers.RawHeader import akka.http.scaladsl.server.Directives._ +import com.amazonaws.services.s3.Headers import com.typesafe.scalalogging.LazyLogging import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException} import io.findify.s3mock.provider.Provider @@ -27,7 +29,7 @@ case class PutObjectMultipartComplete()(implicit provider:Provider) extends Lazy ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`), response.toXML.toString() ) - ) + ).withHeaders(RawHeader(Headers.S3_VERSION_ID, response.objectMetadata.getVersionId)) case Failure(e: NoSuchBucketException) => HttpResponse( StatusCodes.NotFound, diff --git a/src/test/java/io/findify/s3mock/example/JavaBuilderVersionedExample.java b/src/test/java/io/findify/s3mock/example/JavaBuilderVersionedExample.java new file mode 100644 index 0000000..154a982 --- /dev/null +++ b/src/test/java/io/findify/s3mock/example/JavaBuilderVersionedExample.java @@ -0,0 +1,49 @@ +package io.findify.s3mock.example; + +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.AnonymousAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.GetObjectRequest; +import com.amazonaws.services.s3.model.PutObjectResult; +import com.amazonaws.services.s3.model.S3Object; +import com.amazonaws.util.IOUtils; +import io.findify.s3mock.S3Mock; +import org.apache.http.util.Asserts; + +import java.io.IOException; + +/** + * Created by furkilic on 7/11/20. + */ +public class JavaBuilderVersionedExample { + public static void main(String[] args) throws IOException { + S3Mock api = new S3Mock.Builder().withPort(8002).withInMemoryVersionedBackend().build(); + api.start(); + AmazonS3 client = AmazonS3ClientBuilder + .standard() + .withPathStyleAccessEnabled(true) + .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("http://localhost:8002", "us-east-1")) + .withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials())) + .build(); + + String firstContent = "firstContent"; + String secondContent = "secondContent"; + + client.createBucket("testbucket"); + PutObjectResult firstPut = client.putObject("testbucket", "filename", firstContent); + client.putObject("testbucket", "filename", secondContent); + + Asserts.check(getContent(client.getObject(new GetObjectRequest("testbucket", "filename", firstPut.getVersionId()))).equals(firstContent), "With Version"); + Asserts.check(getContent(client.getObject("testbucket", "filename")).equals(secondContent), "Without Version"); + + client.deleteObjects(new DeleteObjectsRequest("testbucket").withKeys("filename")); + + } + + private static String getContent(S3Object s3Object) throws IOException { + return IOUtils.toString(s3Object.getObjectContent().getDelegateStream()); + } +} diff --git a/src/test/scala/io/findify/s3mock/GetPutObjectTest.scala b/src/test/scala/io/findify/s3mock/GetPutObjectTest.scala index 4fd034b..3dec107 100644 --- a/src/test/scala/io/findify/s3mock/GetPutObjectTest.scala +++ b/src/test/scala/io/findify/s3mock/GetPutObjectTest.scala @@ -111,10 +111,10 @@ class GetPutObjectTest extends S3MockTest { it should "work with = in path" in { s3.createBucket("urlencoded") - s3.listBuckets().exists(_.getName == "urlencoded") shouldBe true + s3.listBuckets().asScala.exists(_.getName == "urlencoded") shouldBe true s3.putObject("urlencoded", "path/with=123/foo", "bar=") s3.putObject("urlencoded", "path/withoutequals/foo", "bar") - val result = s3.listObjects("urlencoded").getObjectSummaries.toList.map(_.getKey) + val result = s3.listObjects("urlencoded").getObjectSummaries.asScala.toList.map(_.getKey) result shouldBe List("path/with=123/foo", "path/withoutequals/foo") getContent(s3.getObject("urlencoded", "path/with=123/foo")) shouldBe "bar=" getContent(s3.getObject("urlencoded", "path/withoutequals/foo")) shouldBe "bar" diff --git a/src/test/scala/io/findify/s3mock/S3MockTest.scala b/src/test/scala/io/findify/s3mock/S3MockTest.scala index 3dfad5e..8001ceb 100644 --- a/src/test/scala/io/findify/s3mock/S3MockTest.scala +++ b/src/test/scala/io/findify/s3mock/S3MockTest.scala @@ -11,7 +11,7 @@ import com.amazonaws.services.s3.{AmazonS3, AmazonS3Client, AmazonS3ClientBuilde import com.amazonaws.services.s3.model.S3Object import com.amazonaws.services.s3.transfer.{TransferManager, TransferManagerBuilder} import com.typesafe.config.{Config, ConfigFactory} -import io.findify.s3mock.provider.{FileProvider, InMemoryProvider} +import io.findify.s3mock.provider.{FileProvider, InMemoryProvider, InMemoryVersionedProvider} import scala.collection.JavaConverters._ import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers} @@ -43,10 +43,20 @@ trait S3MockTest extends FlatSpec with Matchers with BeforeAndAfterAll { private val inMemoryTransferManager: TransferManager = TransferManagerBuilder.standard().withS3Client(inMemoryS3).build() private val inMemoryBasedAlpakkaClient = S3 - case class Fixture(server: S3Mock, client: AmazonS3, tm: TransferManager, name: String, port: Int, alpakka: S3.type , system: ActorSystem, mat: Materializer) + private val inMemoryVersionedPort = 8003 + private val inMemoryVersionedConfig = configFor("localhost", inMemoryVersionedPort) + private val inMemoryVersionedSystem = ActorSystem.create("testramversioned", inMemoryVersionedConfig) + private val inMemoryVersionedMat = ActorMaterializer()(inMemoryVersionedSystem) + private val inMemoryVersionedS3 = clientFor("localhost", inMemoryVersionedPort) + private val inMemoryVersionedServer = new S3Mock(inMemoryVersionedPort, new InMemoryVersionedProvider) + private val inMemoryVersionedTransferManager: TransferManager = TransferManagerBuilder.standard().withS3Client(inMemoryVersionedS3).build() + private val inMemoryVersionedBasedAlpakkaClient = S3 + + case class Fixture(server: S3Mock, client: AmazonS3, tm: TransferManager, name: String, port: Int, alpakka: S3.type , system: ActorSystem, mat: Materializer, versioned: Boolean = false) val fixtures = List( Fixture(fileBasedServer, fileBasedS3, fileBasedTransferManager, "file based S3Mock", fileBasedPort, fileBasedAlpakkaClient, fileSystem, fileMat), - Fixture(inMemoryServer, inMemoryS3, inMemoryTransferManager, "in-memory S3Mock", inMemoryPort, inMemoryBasedAlpakkaClient, inMemorySystem, inMemoryMat) + Fixture(inMemoryServer, inMemoryS3, inMemoryTransferManager, "in-memory S3Mock", inMemoryPort, inMemoryBasedAlpakkaClient, inMemorySystem, inMemoryMat), + Fixture(inMemoryVersionedServer, inMemoryVersionedS3, inMemoryVersionedTransferManager, "in-memory-versioned S3Mock", inMemoryVersionedPort, inMemoryVersionedBasedAlpakkaClient, inMemoryVersionedSystem, inMemoryVersionedMat, versioned = true) ) def behaviour(fixture: => Fixture) : Unit @@ -59,15 +69,19 @@ trait S3MockTest extends FlatSpec with Matchers with BeforeAndAfterAll { if (!File(workDir).exists) File(workDir).createDirectory() fileBasedServer.start inMemoryServer.start + inMemoryVersionedServer.start super.beforeAll } override def afterAll = { super.afterAll inMemoryServer.stop + inMemoryVersionedServer.stop fileBasedServer.stop inMemoryTransferManager.shutdownNow() + inMemoryVersionedTransferManager.shutdownNow() Await.result(fileSystem.terminate(), Duration.Inf) Await.result(inMemorySystem.terminate(), Duration.Inf) + Await.result(inMemoryVersionedSystem.terminate(), Duration.Inf) File(workDir).delete() } def getContent(s3Object: S3Object): String = Source.fromInputStream(s3Object.getObjectContent, "UTF-8").mkString diff --git a/src/test/scala/io/findify/s3mock/VersionedObjectTest.scala b/src/test/scala/io/findify/s3mock/VersionedObjectTest.scala new file mode 100644 index 0000000..a24dc38 --- /dev/null +++ b/src/test/scala/io/findify/s3mock/VersionedObjectTest.scala @@ -0,0 +1,62 @@ +package io.findify.s3mock + +import java.io.ByteArrayInputStream + +import com.amazonaws.services.s3.model._ + +import scala.jdk.CollectionConverters._ + +/** + * Created by furkilic on 7/11/20. + */ + +class VersionedObjectTest extends S3MockTest { + val bucketName = "versioned" + + override def behaviour(fixture: => Fixture) = { + val s3 = fixture.client + val versioned = fixture.versioned + it should "put object should generate versionId" in { + s3.createBucket(bucketName).getName shouldBe bucketName + s3.listBuckets().asScala.exists(_.getName == bucketName) shouldBe true + val result = s3.putObject(bucketName, "foo", "bar") + result should not be null + result.getVersionId should not be null + } + it should "MultiPartUpload should generate versionId" in { + s3.createBucket(bucketName) + val init = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, "fooM")) + val p1 = s3.uploadPart(new UploadPartRequest().withBucketName(bucketName).withPartSize(10).withKey("fooM").withPartNumber(1).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream("hellohello".getBytes()))) + val p2 = s3.uploadPart(new UploadPartRequest().withBucketName(bucketName).withPartSize(10).withKey("fooM").withPartNumber(2).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream("worldworld".getBytes()))) + val result = s3.completeMultipartUpload(new CompleteMultipartUploadRequest(bucketName, "fooM", init.getUploadId, List(p1.getPartETag, p2.getPartETag).asJava)) + result should not be null + result.getVersionId should not be null + } + it should "put/get handle versioning object" in { + s3.createBucket(bucketName).getName shouldBe bucketName + s3.listBuckets().asScala.exists(_.getName == bucketName) shouldBe true + val putObjectResult1 = s3.putObject(bucketName, "foo", "bar") + val result1 = getContent(s3.getObject(bucketName, "foo")) + result1 shouldBe "bar" + s3.putObject(bucketName, "foo", "toto") + val result2 = getContent(s3.getObject(bucketName, "foo")) + result2 shouldBe "toto" + val resultVersioned = getContent(s3.getObject(new GetObjectRequest(bucketName, "foo", putObjectResult1.getVersionId))) + resultVersioned shouldBe (if(versioned) "bar" else "toto") + } + it should "mulipart/get handle versioning object" in { + s3.createBucket(bucketName) + val init = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, "fooM")) + val p1 = s3.uploadPart(new UploadPartRequest().withBucketName(bucketName).withPartSize(10).withKey("fooM").withPartNumber(1).withUploadId(init.getUploadId).withInputStream(new ByteArrayInputStream("hellohello".getBytes()))) + val result1 = s3.completeMultipartUpload(new CompleteMultipartUploadRequest(bucketName, "fooM", init.getUploadId, List(p1.getPartETag).asJava)) + getContent(s3.getObject(bucketName, "fooM")) shouldBe "hellohello" + val init1 = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, "fooM")) + val p2 = s3.uploadPart(new UploadPartRequest().withBucketName(bucketName).withPartSize(10).withKey("fooM").withPartNumber(2).withUploadId(init1.getUploadId).withInputStream(new ByteArrayInputStream("worldworld".getBytes()))) + s3.completeMultipartUpload(new CompleteMultipartUploadRequest(bucketName, "fooM", init1.getUploadId, List(p2.getPartETag).asJava)) + getContent(s3.getObject(bucketName, "fooM")) shouldBe "worldworld" + getContent(s3.getObject(new GetObjectRequest(bucketName, "fooM", result1.getVersionId))) shouldBe (if(versioned) "hellohello" else "worldworld") + } + } + +} +