supportedFileAttributeViews() {
+ return ImmutableSet.of("basic", "posix");
+ }
+
+ @Override
+ public S3Path getPath(String first, String... more) {
+ if (more.length == 0) {
+ return new S3Path(this, first);
+ }
+
+ return new S3Path(this, first, more);
+ }
+
+ @Override
+ public PathMatcher getPathMatcher(String syntaxAndPattern) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public UserPrincipalLookupService getUserPrincipalLookupService() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public WatchService newWatchService() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ public AmazonS3 getClient() {
+ return client;
+ }
+
+ /**
+ * get the endpoint associated with this fileSystem.
+ *
+ * @return string
+ * @see http://docs.aws.amazon.com/general/latest/gr/rande.html
+ */
+ public String getEndpoint() {
+ return endpoint;
+ }
+
+ public String[] key2Parts(String keyParts) {
+ String[] parts = keyParts.split(S3Path.PATH_SEPARATOR);
+ String[] split = new String[parts.length];
+ int i = 0;
+ for (String part : parts) {
+ split[i++] = part;
+ }
+ return split;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((endpoint == null) ? 0 : endpoint.hashCode());
+ result = prime * result + ((key == null) ? 0 : key.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (!(obj instanceof S3FileSystem))
+ return false;
+ S3FileSystem other = (S3FileSystem) obj;
+ if (endpoint == null) {
+ if (other.endpoint != null)
+ return false;
+ } else if (!endpoint.equals(other.endpoint))
+ return false;
+ if (key == null) {
+ if (other.key != null)
+ return false;
+ } else if (!key.equals(other.key))
+ return false;
+ return true;
+ }
+
+ @Override
+ public int compareTo(S3FileSystem o) {
+ return key.compareTo(o.getKey());
+ }
+
+ public int getCache() {
+ return cache;
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemConfigurationException.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemConfigurationException.java
new file mode 100644
index 00000000000..9b7e464df74
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemConfigurationException.java
@@ -0,0 +1,9 @@
+package com.scalableminds.webknossos.datastore.s3fs;
+
+public class S3FileSystemConfigurationException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+
+ public S3FileSystemConfigurationException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java
new file mode 100644
index 00000000000..40591c9aeb9
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java
@@ -0,0 +1,636 @@
+package com.scalableminds.webknossos.datastore.s3fs;
+
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.internal.Constants;
+import com.amazonaws.services.s3.model.AmazonS3Exception;
+import com.amazonaws.services.s3.model.Bucket;
+import com.amazonaws.services.s3.model.ObjectMetadata;
+import com.amazonaws.services.s3.model.S3Object;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes;
+import com.scalableminds.webknossos.datastore.s3fs.attribute.S3PosixFileAttributes;
+import com.scalableminds.webknossos.datastore.s3fs.util.AttributesUtils;
+import com.scalableminds.webknossos.datastore.s3fs.util.Cache;
+import com.scalableminds.webknossos.datastore.s3fs.util.S3Utils;
+import org.apache.commons.lang3.NotImplementedException;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.channels.FileChannel;
+import java.nio.channels.SeekableByteChannel;
+import java.nio.file.*;
+import java.nio.file.attribute.*;
+import java.nio.file.spi.FileSystemProvider;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import static com.google.common.collect.Sets.difference;
+import static java.lang.String.format;
+
+/**
+ * Spec:
+ *
+ * URI: s3://[endpoint]/{bucket}/{key} If endpoint is missing, it's assumed to
+ * be the default S3 endpoint (s3.amazonaws.com)
+ *
+ *
+ * FileSystem roots: /{bucket}/
+ *
+ *
+ * Treatment of S3 objects: - If a key ends in "/" it's considered a directory
+ * *and* a regular file. Otherwise, it's just a regular file. - It is legal for
+ * a key "xyz" and "xyz/" to exist at the same time. The latter is treated as a
+ * directory. - If a file "a/b/c" exists but there's no "a" or "a/b/", these are
+ * considered "implicit" directories. They can be listed, traversed and deleted.
+ *
+ *
+ * Deviations from FileSystem provider API: - Deleting a file or directory
+ * always succeeds, regardless of whether the file/directory existed before the
+ * operation was issued i.e. Files.delete() and Files.deleteIfExists() are
+ * equivalent.
+ *
+ *
+ * Future versions of this provider might allow for a strict mode that mimics
+ * the semantics of the FileSystem provider API on a best effort basis, at an
+ * increased processing cost.
+ *
+ */
+public class S3FileSystemProvider extends FileSystemProvider {
+
+ public static final String CHARSET_KEY = "s3fs_charset";
+ public static final String AMAZON_S3_FACTORY_CLASS = "s3fs_amazon_s3_factory";
+
+ private static final ConcurrentMap fileSystems = new ConcurrentHashMap<>();
+ private static final List PROPS_TO_OVERLOAD = Arrays.asList(AmazonS3Factory.ACCESS_KEY, AmazonS3Factory.SECRET_KEY, AmazonS3Factory.REQUEST_METRIC_COLLECTOR_CLASS, AmazonS3Factory.CONNECTION_TIMEOUT, AmazonS3Factory.MAX_CONNECTIONS, AmazonS3Factory.MAX_ERROR_RETRY, AmazonS3Factory.PROTOCOL, AmazonS3Factory.PROXY_DOMAIN,
+ AmazonS3Factory.PROXY_HOST, AmazonS3Factory.PROXY_PASSWORD, AmazonS3Factory.PROXY_PORT, AmazonS3Factory.PROXY_USERNAME, AmazonS3Factory.PROXY_WORKSTATION, AmazonS3Factory.SOCKET_SEND_BUFFER_SIZE_HINT, AmazonS3Factory.SOCKET_RECEIVE_BUFFER_SIZE_HINT, AmazonS3Factory.SOCKET_TIMEOUT,
+ AmazonS3Factory.USER_AGENT, AMAZON_S3_FACTORY_CLASS, AmazonS3Factory.SIGNER_OVERRIDE, AmazonS3Factory.PATH_STYLE_ACCESS);
+
+ private S3Utils s3Utils = new S3Utils();
+ private Cache cache = new Cache();
+
+ @Override
+ public String getScheme() {
+ return "s3";
+ }
+
+ @Override
+ public FileSystem newFileSystem(URI uri, Map env) {
+ validateUri(uri);
+ // get properties for the env or properties or system
+ Properties props = getProperties(uri, env);
+ validateProperties(props);
+ // try to get the filesystem by the key
+ String key = getFileSystemKey(uri, props);
+ if (fileSystems.containsKey(key)) {
+ throw new FileSystemAlreadyExistsException("File system " + uri.getScheme() + ':' + key + " already exists");
+ }
+ // create the filesystem with the final properties, store and return
+ S3FileSystem fileSystem = createFileSystem(uri, props);
+ fileSystems.put(fileSystem.getKey(), fileSystem);
+ return fileSystem;
+ }
+
+ private void validateProperties(Properties props) {
+ Preconditions.checkArgument(
+ (props.getProperty(AmazonS3Factory.ACCESS_KEY) == null && props.getProperty(AmazonS3Factory.SECRET_KEY) == null)
+ || (props.getProperty(AmazonS3Factory.ACCESS_KEY) != null && props.getProperty(AmazonS3Factory.SECRET_KEY) != null), "%s and %s should both be provided or should both be omitted",
+ AmazonS3Factory.ACCESS_KEY, AmazonS3Factory.SECRET_KEY);
+ }
+
+ private Properties getProperties(URI uri, Map env) {
+ Properties props = loadAmazonProperties();
+ addEnvProperties(props, env);
+ // and access key and secret key can be override
+ String userInfo = uri.getUserInfo();
+ if (userInfo != null) {
+ String[] keys = userInfo.split(":");
+ props.setProperty(AmazonS3Factory.ACCESS_KEY, keys[0]);
+ if (keys.length > 1) {
+ props.setProperty(AmazonS3Factory.SECRET_KEY, keys[1]);
+ }
+ }
+ return props;
+ }
+
+ private String getFileSystemKey(URI uri) {
+ return getFileSystemKey(uri, getProperties(uri, null));
+ }
+
+ /**
+ * get the file system key represented by: the access key @ endpoint.
+ * Example: access-key@s3.amazonaws.com
+ * If uri host is empty then s3.amazonaws.com are used as host
+ *
+ * @param uri URI with the endpoint
+ * @param props with the access key property
+ * @return String
+ */
+ protected String getFileSystemKey(URI uri, Properties props) {
+ // we don`t use uri.getUserInfo and uri.getHost because secret key and access key have special chars
+ // and dont return the correct strings
+ String uriString = uri.toString().replace("s3://", "");
+ String authority = null;
+ int authoritySeparator = uriString.indexOf("@");
+
+ if (authoritySeparator > 0) {
+ authority = uriString.substring(0, authoritySeparator);
+ }
+
+ if (authority != null) {
+ String host = uriString.substring(uriString.indexOf("@") + 1, uriString.length());
+ int lastPath = host.indexOf("/");
+ if (lastPath > -1) {
+ host = host.substring(0, lastPath);
+ }
+ if (host.length() == 0) {
+ host = Constants.S3_HOSTNAME;
+ }
+ return authority + "@" + host;
+ } else {
+ String accessKey = (String) props.get(AmazonS3Factory.ACCESS_KEY);
+ return (accessKey != null ? accessKey + "@" : "") +
+ (uri.getHost() != null ? uri.getHost() : Constants.S3_HOSTNAME);
+ }
+ }
+
+ protected void validateUri(URI uri) {
+ Preconditions.checkNotNull(uri, "uri is null");
+ Preconditions.checkArgument(uri.getScheme().equals(getScheme()), "uri scheme must be 's3': '%s'", uri);
+ }
+
+ protected void addEnvProperties(Properties props, Map env) {
+ if (env == null)
+ env = new HashMap<>();
+ for (String key : PROPS_TO_OVERLOAD) {
+ // but can be overloaded by envs vars
+ overloadProperty(props, env, key);
+ }
+
+ for (String key : env.keySet()) {
+ Object value = env.get(key);
+ if (!PROPS_TO_OVERLOAD.contains(key)) {
+ props.put(key, value);
+ }
+ }
+ }
+
+ /**
+ * try to override the properties props with:
+ *
+ * - the map or if not setted:
+ * - the system property or if not setted:
+ * - the system vars
+ *
+ *
+ * @param props Properties to override
+ * @param env Map the first option
+ * @param key String the key
+ */
+ private void overloadProperty(Properties props, Map env, String key) {
+ boolean overloaded = overloadPropertiesWithEnv(props, env, key);
+
+ if (!overloaded) {
+ overloaded = overloadPropertiesWithSystemProps(props, key);
+ }
+
+ if (!overloaded) {
+ overloadPropertiesWithSystemEnv(props, key);
+ }
+ }
+
+ /**
+ * @return true if the key are overloaded by the map parameter
+ */
+ protected boolean overloadPropertiesWithEnv(Properties props, Map env, String key) {
+ if (env.get(key) != null && env.get(key) instanceof String) {
+ props.setProperty(key, (String) env.get(key));
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * @return true if the key are overloaded by a system property
+ */
+ public boolean overloadPropertiesWithSystemProps(Properties props, String key) {
+ if (System.getProperty(key) != null) {
+ props.setProperty(key, System.getProperty(key));
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * The system envs have preference over the properties files.
+ * So we overload it
+ * @param props Properties
+ * @param key String
+ * @return true if the key are overloaded by a system property
+ */
+ public boolean overloadPropertiesWithSystemEnv(Properties props, String key) {
+ if (systemGetEnv(key) != null) {
+ props.setProperty(key, systemGetEnv(key));
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Get the system env with the key param
+ * @param key String
+ * @return String or null
+ */
+ public String systemGetEnv(String key) {
+ return System.getenv(key);
+ }
+
+ /**
+ * Get existing filesystem based on a combination of URI and env settings. Create new filesystem otherwise.
+ *
+ * @param uri URI of existing, or to be created filesystem.
+ * @param env environment settings.
+ * @return new or existing filesystem.
+ */
+ public FileSystem getFileSystem(URI uri, Map env) {
+ validateUri(uri);
+ Properties props = getProperties(uri, env);
+ String key = this.getFileSystemKey(uri, props); // s3fs_access_key is part of the key here.
+ if (fileSystems.containsKey(key))
+ return fileSystems.get(key);
+ return newFileSystem(uri, env);
+ }
+
+ @Override
+ public S3FileSystem getFileSystem(URI uri) {
+ validateUri(uri);
+ String key = this.getFileSystemKey(uri);
+ if (fileSystems.containsKey(key)) {
+ return fileSystems.get(key);
+ } else {
+ throw new FileSystemNotFoundException("S3 filesystem not yet created. Use newFileSystem() instead");
+ }
+ }
+
+ private S3Path toS3Path(Path path) {
+ Preconditions.checkArgument(path instanceof S3Path, "path must be an instance of %s", S3Path.class.getName());
+ return (S3Path) path;
+ }
+
+ /**
+ * Deviation from spec: throws FileSystemNotFoundException if FileSystem
+ * hasn't yet been initialized. Call newFileSystem() first.
+ * Need credentials. Maybe set credentials after? how?
+ * TODO: we can create a new one if the credentials are present by:
+ * s3://access-key:secret-key@endpoint.com/
+ */
+ @Override
+ public Path getPath(URI uri) {
+ FileSystem fileSystem = getFileSystem(uri);
+ /**
+ * TODO: set as a list. one s3FileSystem by region
+ */
+ return fileSystem.getPath(uri.getPath());
+ }
+
+ @Override
+ public DirectoryStream newDirectoryStream(Path dir, DirectoryStream.Filter super Path> filter) throws IOException {
+ final S3Path s3Path = toS3Path(dir);
+ return new DirectoryStream() {
+ @Override
+ public void close() throws IOException {
+ // nothing to do here
+ }
+
+ @Override
+ public Iterator iterator() {
+ return new S3Iterator(s3Path);
+ }
+ };
+ }
+
+ @Override
+ public InputStream newInputStream(Path path, OpenOption... options) throws IOException {
+ S3Path s3Path = toS3Path(path);
+ String key = s3Path.getKey();
+
+ Preconditions.checkArgument(options.length == 0, "OpenOptions not yet supported: %s", ImmutableList.copyOf(options)); // TODO
+ Preconditions.checkArgument(!key.equals(""), "cannot create InputStream for root directory: %s", path);
+
+ try {
+ S3Object object = s3Path.getFileSystem().getClient().getObject(s3Path.getFileStore().name(), key);
+ InputStream res = object.getObjectContent();
+
+ if (res == null)
+ throw new IOException(String.format("The specified path is a directory: %s", path));
+
+ return res;
+ } catch (AmazonS3Exception e) {
+ if (e.getStatusCode() == 404)
+ throw new NoSuchFileException(path.toString());
+ // otherwise throws a generic IO exception
+ throw new IOException(String.format("Cannot access file: %s", path), e);
+ }
+ }
+
+ @Override
+ public SeekableByteChannel newByteChannel(Path path, Set extends OpenOption> options, FileAttribute>... attrs) throws IOException {
+ S3Path s3Path = toS3Path(path);
+ if (options.isEmpty() || options.contains(StandardOpenOption.READ)) {
+ if (options.contains(StandardOpenOption.WRITE))
+ throw new UnsupportedOperationException(
+ "Can't read and write one on channel"
+ );
+ return new S3ReadOnlySeekableByteChannel(s3Path, options);
+ } else {
+ return new S3SeekableByteChannel(s3Path, options);
+ }
+ }
+
+ @Override
+ public FileChannel newFileChannel(Path path, Set extends OpenOption> options, FileAttribute>... attrs) throws IOException {
+ S3Path s3Path = toS3Path(path);
+ return new S3FileChannel(s3Path, options);
+ }
+
+ /**
+ * Deviations from spec: Does not perform atomic check-and-create. Since a
+ * directory is just an S3 object, all directories in the hierarchy are
+ * created or it already existed.
+ */
+ @Override
+ public void createDirectory(Path dir, FileAttribute>... attrs) throws IOException {
+ S3Path s3Path = toS3Path(dir);
+ Preconditions.checkArgument(attrs.length == 0, "attrs not yet supported: %s", ImmutableList.copyOf(attrs)); // TODO
+ if (exists(s3Path))
+ throw new FileAlreadyExistsException(format("target already exists: %s", s3Path));
+ // create bucket if necesary
+ Bucket bucket = s3Path.getFileStore().getBucket();
+ String bucketName = s3Path.getFileStore().name();
+ if (bucket == null) {
+ s3Path.getFileSystem().getClient().createBucket(bucketName);
+ }
+ // create the object as directory
+ ObjectMetadata metadata = new ObjectMetadata();
+ metadata.setContentLength(0);
+ String directoryKey = s3Path.getKey().endsWith("/") ? s3Path.getKey() : s3Path.getKey() + "/";
+ s3Path.getFileSystem().getClient().putObject(bucketName, directoryKey, new ByteArrayInputStream(new byte[0]), metadata);
+ }
+
+ @Override
+ public void delete(Path path) throws IOException {
+ S3Path s3Path = toS3Path(path);
+ if (Files.notExists(s3Path))
+ throw new NoSuchFileException("the path: " + this + " not exists");
+ if (Files.isDirectory(s3Path) && Files.newDirectoryStream(s3Path).iterator().hasNext())
+ throw new DirectoryNotEmptyException("the path: " + this + " is a directory and is not empty");
+
+ String key = s3Path.getKey();
+ String bucketName = s3Path.getFileStore().name();
+ s3Path.getFileSystem().getClient().deleteObject(bucketName, key);
+ // we delete the two objects (sometimes exists the key '/' and sometimes not)
+ s3Path.getFileSystem().getClient().deleteObject(bucketName, key + "/");
+ }
+
+ @Override
+ public void copy(Path source, Path target, CopyOption... options) throws IOException {
+ if (isSameFile(source, target))
+ return;
+
+ S3Path s3Source = toS3Path(source);
+ S3Path s3Target = toS3Path(target);
+ // TODO: implements support for copying directories
+
+ Preconditions.checkArgument(!Files.isDirectory(source), "copying directories is not yet supported: %s", source);
+ Preconditions.checkArgument(!Files.isDirectory(target), "copying directories is not yet supported: %s", target);
+
+ ImmutableSet actualOptions = ImmutableSet.copyOf(options);
+ verifySupportedOptions(EnumSet.of(StandardCopyOption.REPLACE_EXISTING), actualOptions);
+
+ if (exists(s3Target) && !actualOptions.contains(StandardCopyOption.REPLACE_EXISTING)) {
+ throw new FileAlreadyExistsException(format("target already exists: %s", target));
+ }
+
+ String bucketNameOrigin = s3Source.getFileStore().name();
+ String keySource = s3Source.getKey();
+ String bucketNameTarget = s3Target.getFileStore().name();
+ String keyTarget = s3Target.getKey();
+ s3Source.getFileSystem()
+ .getClient().copyObject(
+ bucketNameOrigin,
+ keySource,
+ bucketNameTarget,
+ keyTarget);
+ }
+
+ @Override
+ public void move(Path source, Path target, CopyOption... options) throws IOException {
+ if (options != null && Arrays.asList(options).contains(StandardCopyOption.ATOMIC_MOVE))
+ throw new AtomicMoveNotSupportedException(source.toString(), target.toString(), "Atomic not supported");
+ copy(source, target, options);
+ delete(source);
+ }
+
+ @Override
+ public boolean isSameFile(Path path1, Path path2) throws IOException {
+ return path1.isAbsolute() && path2.isAbsolute() && path1.equals(path2);
+ }
+
+ @Override
+ public boolean isHidden(Path path) throws IOException {
+ return false;
+ }
+
+ @Override
+ public FileStore getFileStore(Path path) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void checkAccess(Path path, AccessMode... modes) throws IOException {
+ S3Path s3Path = toS3Path(path);
+ Preconditions.checkArgument(s3Path.isAbsolute(), "path must be absolute: %s", s3Path);
+ if (modes.length == 0) {
+ if (exists(s3Path))
+ return;
+ throw new NoSuchFileException(toString());
+ }
+
+ String key = s3Utils.getS3ObjectSummary(s3Path).getKey();
+ S3AccessControlList accessControlList =
+ new S3AccessControlList(s3Path.getFileStore().name(), key, s3Path.getFileSystem().getClient().getObjectAcl(s3Path.getFileStore().name(), key), s3Path.getFileStore().getOwner());
+
+ accessControlList.checkAccess(modes);
+ }
+
+
+ @Override
+ public V getFileAttributeView(Path path, Class type, LinkOption... options) {
+ throw new NotImplementedException();
+ }
+
+ @Override
+ public A readAttributes(Path path, Class type, LinkOption... options) throws IOException {
+ S3Path s3Path = toS3Path(path);
+ if (type == BasicFileAttributes.class) {
+ if (cache.isInTime(s3Path.getFileSystem().getCache(), s3Path.getFileAttributes())) {
+ A result = type.cast(s3Path.getFileAttributes());
+ s3Path.setFileAttributes(null);
+ return result;
+ } else {
+ S3BasicFileAttributes attrs = s3Utils.getS3FileAttributes(s3Path);
+ s3Path.setFileAttributes(attrs);
+ return type.cast(attrs);
+ }
+ } else if (type == PosixFileAttributes.class) {
+ if (s3Path.getFileAttributes() instanceof PosixFileAttributes &&
+ cache.isInTime(s3Path.getFileSystem().getCache(), s3Path.getFileAttributes())) {
+ A result = type.cast(s3Path.getFileAttributes());
+ s3Path.setFileAttributes(null);
+ return result;
+ }
+
+ S3PosixFileAttributes attrs = s3Utils.getS3PosixFileAttributes(s3Path);
+ s3Path.setFileAttributes(attrs);
+ return type.cast(attrs);
+ }
+
+ throw new UnsupportedOperationException(format("only %s or %s supported", BasicFileAttributes.class, PosixFileAttributes.class));
+ }
+
+ @Override
+ public Map readAttributes(Path path, String attributes, LinkOption... options) throws IOException {
+ if (attributes == null) {
+ throw new IllegalArgumentException("Attributes null");
+ }
+
+ if (attributes.contains(":") && !attributes.contains("basic:") && !attributes.contains("posix:")) {
+ throw new UnsupportedOperationException(format("attributes %s are not supported, only basic / posix are supported", attributes));
+ }
+
+ if (attributes.equals("*") || attributes.equals("basic:*")) {
+ BasicFileAttributes attr = readAttributes(path, BasicFileAttributes.class, options);
+ return AttributesUtils.fileAttributeToMap(attr);
+ } else if (attributes.equals("posix:*")) {
+ PosixFileAttributes attr = readAttributes(path, PosixFileAttributes.class, options);
+ return AttributesUtils.fileAttributeToMap(attr);
+ } else {
+ String[] filters = new String[]{attributes};
+ if (attributes.contains(",")) {
+ filters = attributes.split(",");
+ }
+ Class extends BasicFileAttributes> filter = BasicFileAttributes.class;
+ if (attributes.startsWith("posix:")) {
+ filter = PosixFileAttributes.class;
+ }
+ return AttributesUtils.fileAttributeToMap(readAttributes(path, filter, options), filters);
+ }
+ }
+
+ @Override
+ public void setAttribute(Path path, String attribute, Object value, LinkOption... options) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ // ~~
+
+ /**
+ * Create the fileSystem
+ *
+ * @param uri URI
+ * @param props Properties
+ * @return S3FileSystem never null
+ */
+ public S3FileSystem createFileSystem(URI uri, Properties props) {
+ URI uriWithNormalizedHost = resolveShortcutHost(uri);
+ return new S3FileSystem(this, getFileSystemKey(uriWithNormalizedHost, props), getAmazonS3(uriWithNormalizedHost, props), uriWithNormalizedHost.getHost());
+ }
+
+ private URI resolveShortcutHost(URI uri) {
+ String host = uri.getHost();
+ if (!uri.getHost().contains(".")) {
+ host += ".s3.amazonaws.com";
+ }
+ try {
+ return new URI(uri.getScheme(), uri.getUserInfo(), host, uri.getPort(), uri.getPath(), uri.getQuery(), uri.getFragment());
+ } catch (URISyntaxException e) {
+ return uri;
+ }
+ }
+
+ protected AmazonS3 getAmazonS3(URI uri, Properties props) {
+ return new AmazonS3Factory().getAmazonS3Client(uri, props);
+ }
+
+ /**
+ * find /amazon.properties in the classpath
+ *
+ * @return Properties amazon.properties
+ */
+ public Properties loadAmazonProperties() {
+ Properties props = new Properties();
+ // http://www.javaworld.com/javaworld/javaqa/2003-06/01-qa-0606-load.html
+ // http://www.javaworld.com/javaqa/2003-08/01-qa-0808-property.html
+ try (InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream("amazon.properties")) {
+ if (in != null)
+ props.load(in);
+ } catch (IOException e) {
+ // If amazon.properties can't be loaded that's ok.
+ }
+ return props;
+ }
+
+ // ~~~
+
+ private void verifySupportedOptions(Set extends T> allowedOptions, Set extends T> actualOptions) {
+ Sets.SetView extends T> unsupported = difference(actualOptions, allowedOptions);
+ Preconditions.checkArgument(unsupported.isEmpty(), "the following options are not supported: %s", unsupported);
+ }
+
+ /**
+ * check that the paths exists or not
+ *
+ * @param path S3Path
+ * @return true if exists
+ */
+ boolean exists(S3Path path) {
+ S3Path s3Path = toS3Path(path);
+ try {
+ s3Utils.getS3ObjectSummary(s3Path);
+ return true;
+ } catch (NoSuchFileException e) {
+ return false;
+ }
+ }
+
+ public void close(S3FileSystem fileSystem) {
+ if (fileSystem.getKey() != null && fileSystems.containsKey(fileSystem.getKey()))
+ fileSystems.remove(fileSystem.getKey());
+ }
+
+ public boolean isOpen(S3FileSystem s3FileSystem) {
+ return fileSystems.containsKey(s3FileSystem.getKey());
+ }
+
+ /**
+ * only 4 testing
+ */
+
+ protected static ConcurrentMap getFilesystems() {
+ return fileSystems;
+ }
+
+ public Cache getCache() {
+ return cache;
+ }
+
+ public void setCache(Cache cache) {
+ this.cache = cache;
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Iterator.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Iterator.java
new file mode 100644
index 00000000000..55380755971
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Iterator.java
@@ -0,0 +1,192 @@
+package com.scalableminds.webknossos.datastore.s3fs;
+
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Set;
+
+import com.amazonaws.services.s3.model.ListObjectsRequest;
+import com.amazonaws.services.s3.model.ObjectListing;
+import com.amazonaws.services.s3.model.S3ObjectSummary;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.scalableminds.webknossos.datastore.s3fs.util.S3Utils;
+
+/**
+ * S3 iterator over folders at first level.
+ * Future versions of this class should be return the elements
+ * in a incremental way when the #next() method is called.
+ */
+public class S3Iterator implements Iterator {
+ private S3FileSystem fileSystem;
+ private S3FileStore fileStore;
+ private String key;
+ private List items = Lists.newArrayList();
+ private Set addedVirtualDirectories = Sets.newHashSet();
+ private ObjectListing current;
+ private int cursor; // index of next element to return
+ private int size;
+ private boolean incremental;
+
+ private S3Utils s3Utils = new S3Utils();
+
+ public S3Iterator(S3Path path) {
+ this(path, false);
+ }
+
+ public S3Iterator(S3Path path, boolean incremental) {
+ this(path.getFileStore(), path.getKey() + (!incremental && !path.getKey().isEmpty() && !path.getKey().endsWith("/") ? "/" : ""), incremental);
+ }
+
+ public S3Iterator(S3FileStore fileStore, String key, boolean incremental) {
+ ListObjectsRequest listObjectsRequest = buildRequest(fileStore.name(), key, incremental);
+
+ this.fileStore = fileStore;
+ this.fileSystem = fileStore.getFileSystem();
+ this.key = key;
+ this.current = fileSystem.getClient().listObjects(listObjectsRequest);
+ this.incremental = incremental;
+ loadObjects();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return cursor != size || current.isTruncated();
+ }
+
+ @Override
+ public S3Path next() {
+ if (cursor == size && current.isTruncated()) {
+ this.current = fileSystem.getClient().listNextBatchOfObjects(current);
+ loadObjects();
+ }
+ if (cursor == size)
+ throw new NoSuchElementException();
+ return items.get(cursor++);
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ private void loadObjects() {
+ this.items.clear();
+ if (incremental)
+ parseObjects();
+ else
+ parseObjectListing(key, items, current);
+ this.size = items.size();
+ this.cursor = 0;
+ }
+
+ private void parseObjects() {
+ for (final S3ObjectSummary objectSummary : current.getObjectSummaries()) {
+ final String objectSummaryKey = objectSummary.getKey();
+ String[] keyParts = fileSystem.key2Parts(objectSummaryKey);
+ addParentPaths(keyParts);
+ S3Path path = new S3Path(fileSystem, "/" + fileStore.name(), keyParts);
+ if (!items.contains(path)) {
+ items.add(path);
+ }
+ }
+ }
+
+ private void addParentPaths(String[] keyParts) {
+ if (keyParts.length <= 1)
+ return;
+ String[] subParts = Arrays.copyOf(keyParts, keyParts.length - 1);
+ List parentPaths = new ArrayList<>();
+ while (subParts.length > 0) {
+ S3Path path = new S3Path(fileSystem, "/" + fileStore.name(), subParts);
+ String prefix = current.getPrefix();
+
+ String parentKey = path.getKey();
+ if (prefix.length() > parentKey.length() && prefix.contains(parentKey))
+ break;
+ if (items.contains(path) || addedVirtualDirectories.contains(path)) {
+ subParts = Arrays.copyOf(subParts, subParts.length - 1);
+ continue;
+ }
+ parentPaths.add(path);
+ addedVirtualDirectories.add(path);
+ subParts = Arrays.copyOf(subParts, subParts.length - 1);
+ }
+ Collections.reverse(parentPaths);
+ items.addAll(parentPaths);
+ }
+
+
+ /**
+ * add to the listPath the elements at the same level that s3Path
+ *
+ * @param key the uri to parse
+ * @param listPath List not null list to add
+ * @param current ObjectListing to walk
+ */
+ private void parseObjectListing(String key, List listPath, ObjectListing current) {
+ for (String commonPrefix : current.getCommonPrefixes()) {
+ if (!commonPrefix.equals("/")) {
+ listPath.add(new S3Path(fileSystem, "/" + fileStore.name(), fileSystem.key2Parts(commonPrefix)));
+ }
+ }
+ // TODO: figure our a way to efficiently preprocess commonPrefix basicFileAttributes
+ for (final S3ObjectSummary objectSummary : current.getObjectSummaries()) {
+ final String objectSummaryKey = objectSummary.getKey();
+ // we only want the first level
+ String immediateDescendantKey = getImmediateDescendant(key, objectSummaryKey);
+ if (immediateDescendantKey != null) {
+ S3Path descendentPart = new S3Path(fileSystem, "/" + fileStore.name(), fileSystem.key2Parts(immediateDescendantKey));
+ descendentPart.setFileAttributes(s3Utils.toS3FileAttributes(objectSummary, descendentPart.getKey()));
+ if (!listPath.contains(descendentPart)) {
+ listPath.add(descendentPart);
+ }
+ }
+ }
+ }
+
+ /**
+ * The current #buildRequest() get all subdirectories and her content.
+ * This method filter the keyChild and check if is a inmediate
+ * descendant of the keyParent parameter
+ *
+ * @param keyParent String
+ * @param keyChild String
+ * @return String parsed
+ * or null when the keyChild and keyParent are the same and not have to be returned
+ */
+ private String getImmediateDescendant(String keyParent, String keyChild) {
+ keyParent = deleteExtraPath(keyParent);
+ keyChild = deleteExtraPath(keyChild);
+ final int parentLen = keyParent.length();
+ final String childWithoutParent = deleteExtraPath(keyChild.substring(parentLen));
+ String[] parts = childWithoutParent.split("/");
+ if (parts.length > 0 && !parts[0].isEmpty())
+ return keyParent + "/" + parts[0];
+ return null;
+
+ }
+
+ private String deleteExtraPath(String keyChild) {
+ if (keyChild.startsWith("/"))
+ keyChild = keyChild.substring(1);
+ if (keyChild.endsWith("/"))
+ keyChild = keyChild.substring(0, keyChild.length() - 1);
+ return keyChild;
+ }
+
+
+ ListObjectsRequest buildRequest(String bucketName, String key, boolean incremental) {
+ return buildRequest(bucketName, key, incremental, null);
+ }
+
+ ListObjectsRequest buildRequest(String bucketName, String key, boolean incremental, Integer maxKeys) {
+ if (incremental)
+ return new ListObjectsRequest(bucketName, key, null, null, maxKeys);
+ return new ListObjectsRequest(bucketName, key, key, "/", maxKeys);
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Path.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Path.java
new file mode 100644
index 00000000000..37d1261e670
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Path.java
@@ -0,0 +1,614 @@
+package com.scalableminds.webknossos.datastore.s3fs;
+
+import com.google.common.base.*;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URI;
+import java.net.URL;
+import java.net.URLDecoder;
+import java.nio.file.*;
+import java.util.Iterator;
+import java.util.List;
+
+import static com.google.common.collect.Iterables.*;
+import static java.lang.String.format;
+
+public class S3Path implements Path {
+
+ public static final String PATH_SEPARATOR = "/";
+ /**
+ * S3FileStore which represents the Bucket this path resides in.
+ */
+ private final S3FileStore fileStore;
+
+ /**
+ * URI not encoded
+ * Is the key for AmazonS3
+ */
+ private String uri;
+
+ /**
+ * actual filesystem
+ */
+ private S3FileSystem fileSystem;
+
+ /**
+ * S3BasicFileAttributes cache
+ */
+ private S3BasicFileAttributes fileAttributes;
+
+ /**
+ * Build an S3Path from path segments. '/' are stripped from each segment.
+ *
+ * @param fileSystem S3FileSystem
+ * @param first should be start with a '/' and is the bucket name
+ * @param more directories and files
+ */
+ public S3Path(S3FileSystem fileSystem, String first, String... more) {
+
+ Preconditions.checkArgument(first != null, "first path must be not null");
+ Preconditions.checkArgument(!first.startsWith("//"), "first path doesnt start with '//'. Miss bucket");
+ // see tests com.upplication.s3fs.Path.EndsWithTest#endsWithRelativeBlankAbsolute()
+ // Preconditions.checkArgument(!first.isEmpty(), "first path must be not empty");
+
+ boolean hasBucket = first.startsWith("/");
+
+
+ List pathsURI = Lists
+ .newArrayList(Splitter.on(PATH_SEPARATOR)
+ .omitEmptyStrings()
+ .split(first));
+
+ if (hasBucket) { // absolute path
+
+ Preconditions.checkArgument(pathsURI.size() >= 1, "path must start with bucket name");
+ Preconditions.checkArgument(!pathsURI.get(0).isEmpty(), "bucket name must be not empty");
+ String bucket = pathsURI.get(0);
+ this.fileStore = new S3FileStore(fileSystem, bucket);
+ // the filestore is not part of the uri
+ pathsURI.remove(0);
+ }
+ else {
+ // relative uri
+ this.fileStore = null;
+ }
+
+ StringBuilder uriBuilder = new StringBuilder();
+ if (hasBucket) {
+ uriBuilder.append(PATH_SEPARATOR);
+ }
+ for (String path : pathsURI) {
+ uriBuilder.append(path + PATH_SEPARATOR);
+ }
+ if (more != null) {
+ for (String path : more) {
+ uriBuilder.append(path + PATH_SEPARATOR);
+ }
+ }
+ this.uri = normalizeURI(uriBuilder.toString());
+ // remove last PATH_SEPARATOR
+ if (!first.isEmpty() &&
+ // only first param and not ended with PATH_SEPARATOR
+ ((!first.endsWith(PATH_SEPARATOR) && (more == null || more.length == 0))
+ // we have more param and not ended with PATH_SEPARATOR
+ || more != null && more.length > 0 && !more[more.length-1].endsWith(PATH_SEPARATOR))) {
+ this.uri = this.uri.substring(0, this.uri.length() - 1);
+ }
+
+ this.fileSystem = fileSystem;
+ }
+
+ /**
+ * Remove duplicated slash
+ */
+ private String normalizeURI(String uri) {
+ return uri.replace("//", "/");
+ }
+
+ public S3FileStore getFileStore() {
+ return fileStore;
+ }
+
+ /**
+ * key for amazon without final slash.
+ * note: the final slash need to be added to save a directory (Amazon s3 spec)
+ *
+ * @return the key for AmazonS3Client
+ */
+ public String getKey() {
+
+ String key = this.uri;
+
+ if (key.startsWith("/")) {
+ key = key.substring(1, key.length());
+ }
+
+ return key;
+ }
+
+ @Override
+ public S3FileSystem getFileSystem() {
+ return this.fileSystem;
+ }
+
+ @Override
+ public boolean isAbsolute() {
+ return fileStore != null;
+ }
+
+ @Override
+ public Path getRoot() {
+ if (isAbsolute()) {
+ return new S3Path(fileSystem, PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR);
+ }
+
+ return null;
+ }
+
+ @Override
+ public Path getFileName() {
+ List paths = uriToList();
+ if (paths.isEmpty()) {
+ // get FileName of root directory is null
+ return null;
+ }
+ String filename = paths.get(paths.size()-1);
+ return new S3Path(fileSystem, filename);
+ }
+
+ @Override
+ public Path getParent() {
+ // bucket is not present in the parts
+ if (uri.isEmpty()) {
+ return null;
+ }
+
+ String newUri = this.uri;
+
+ if (this.uri.endsWith("/")) {
+ newUri = this.uri.substring(0, this.uri.length()-1);
+ }
+ int lastPathSeparatorPosition = newUri.lastIndexOf(PATH_SEPARATOR);
+
+ if (lastPathSeparatorPosition == -1) {
+ return null;
+ }
+
+ newUri = uri.substring(0, lastPathSeparatorPosition + 1);
+
+ if (newUri.isEmpty())
+ return null;
+
+ String filestore = isAbsolute() ? PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR : "";
+
+ return new S3Path(fileSystem, filestore + newUri);
+ }
+
+ @Override
+ public int getNameCount() {
+ return uriToList().size();
+ }
+
+ @Override
+ public Path getName(int index) {
+
+ List paths = uriToList();
+
+ if (index < 0 || index >= paths.size()) {
+ throw new IllegalArgumentException("index out of range");
+ }
+
+ String path = paths.get(index);
+ StringBuilder pathsBuilder = new StringBuilder();
+ if (isAbsolute() && index == 0) {
+ pathsBuilder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR);
+ }
+ pathsBuilder.append(path);
+
+ if (index < paths.size() - 1) {
+ pathsBuilder.append(PATH_SEPARATOR);
+ }
+
+ // if is the last path, check if end with path separator
+ if (index == paths.size() - 1 && this.uri.endsWith(PATH_SEPARATOR)) {
+ pathsBuilder.append(PATH_SEPARATOR);
+ }
+
+ return new S3Path(fileSystem, pathsBuilder.toString());
+ }
+
+ private List uriToList() {
+ return Splitter.on(PATH_SEPARATOR).omitEmptyStrings().splitToList(this.uri);
+ }
+
+ /**
+ * The bucket name not count
+ */
+ @Override
+ public Path subpath(int beginIndex, int endIndex) {
+
+ List paths = uriToList();
+
+ if (beginIndex < 0 || endIndex > paths.size()) {
+ throw new IllegalArgumentException("index out of range");
+ }
+
+ List pathSubList = paths.subList(beginIndex, endIndex);
+ StringBuilder pathsStringBuilder = new StringBuilder();
+
+ // build path string
+
+ if (this.isAbsolute() && beginIndex == 0) {
+ pathsStringBuilder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR);
+ }
+ for (String path : pathSubList) {
+ pathsStringBuilder.append(path).append(PATH_SEPARATOR);
+ }
+ String pathsResult = pathsStringBuilder.toString();
+ // if the uri doesnt have last PATH_SEPARATOR we must remove it.
+ if (endIndex == paths.size() && !this.uri.endsWith(PATH_SEPARATOR)) {
+ pathsResult = pathsResult.substring(0, pathsResult.length() - 1);
+ }
+
+ return new S3Path(fileSystem, pathsResult);
+ }
+
+ @Override
+ public boolean startsWith(Path other) {
+
+ if (other.getNameCount() > this.getNameCount()) {
+ return false;
+ }
+
+ if (!(other instanceof S3Path)) {
+ return false;
+ }
+
+ if (this.isAbsolute() && !other.isAbsolute()) {
+ return false;
+ }
+
+ S3Path path = (S3Path) other;
+
+ if (this.isAbsolute() && other.isAbsolute() &&
+ !this.fileStore.name().equals(path.fileStore.name())) {
+ return false;
+ }
+
+ if (path.uri.isEmpty() && !this.uri.isEmpty()) {
+ return false;
+ }
+
+ List pathsOther = path.uriToList();
+ List paths = this.uriToList();
+
+
+ for (int i = 0; i < pathsOther.size(); i++) {
+ if (!pathsOther.get(i).equals(paths.get(i))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public boolean startsWith(String path) {
+ S3Path other = new S3Path(this.fileSystem, path);
+ return this.startsWith(other);
+ }
+
+ @Override
+ public boolean endsWith(Path other) {
+ if (other.getNameCount() > this.getNameCount()) {
+ return false;
+ }
+ // empty
+ if (other.getNameCount() == 0 && this.getNameCount() != 0) {
+ return false;
+ }
+
+ if (!(other instanceof S3Path)) {
+ return false;
+ }
+
+ S3Path path = (S3Path) other;
+
+ if ((path.getFileStore() != null && !path.getFileStore().equals(this.getFileStore())) || (path.getFileStore() != null && this.getFileStore() == null)) {
+ return false;
+ }
+
+ // check subkeys
+
+ List pathsOther = path.uriToList();
+ List paths = this.uriToList();
+
+
+ int i = pathsOther.size() - 1;
+ int j = paths.size() - 1;
+ for (; i >= 0 && j >= 0; ) {
+
+ if (!pathsOther.get(i).equals(paths.get(j))) {
+ return false;
+ }
+ i--;
+ j--;
+ }
+ return true;
+ }
+
+ @Override
+ public boolean endsWith(String other) {
+ return this.endsWith(new S3Path(this.fileSystem, other));
+ }
+
+ @Override
+ public Path normalize() {
+ return this;
+ }
+
+ @Override
+ public Path resolve(Path other) {
+ if (other.isAbsolute()) {
+ Preconditions.checkArgument(other instanceof S3Path, "other must be an instance of %s", S3Path.class.getName());
+ return other;
+ }
+
+ S3Path otherS3Path = (S3Path) other;
+ StringBuilder pathBuilder = new StringBuilder();
+
+ if (this.isAbsolute()) {
+ pathBuilder.append(PATH_SEPARATOR + this.fileStore.name() + PATH_SEPARATOR);
+ }
+ pathBuilder.append(this.uri);
+ if (!otherS3Path.uri.isEmpty())
+ pathBuilder.append(PATH_SEPARATOR + otherS3Path.uri);
+
+ return new S3Path(this.fileSystem, pathBuilder.toString());
+ }
+
+ @Override
+ public Path resolve(String other) {
+ return resolve(new S3Path(this.getFileSystem(), other));
+ }
+
+ @Override
+ public Path resolveSibling(Path other) {
+ Preconditions.checkArgument(other instanceof S3Path, "other must be an instance of %s", S3Path.class.getName());
+
+ S3Path s3Path = (S3Path) other;
+
+ Path parent = getParent();
+
+ if (parent == null || s3Path.isAbsolute()) {
+ return s3Path;
+ }
+
+ List othersPaths = s3Path.uriToList();
+
+ if (othersPaths.isEmpty()) { // other is relative and empty
+ return parent;
+ }
+
+ List paths = this.uriToList();
+
+ StringBuilder pathBuilder = new StringBuilder();
+ String lastPath = othersPaths.get(othersPaths.size() - 1);
+ if (isAbsolute()) {
+ pathBuilder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR);
+ }
+ for (String path : concat(paths.subList(0, paths.size() - 1), othersPaths)) {
+ pathBuilder.append(path);
+ if (!lastPath.equals(path) || s3Path.uri.endsWith(PATH_SEPARATOR)) {
+ pathBuilder.append(PATH_SEPARATOR);
+ }
+ }
+
+ return new S3Path(fileSystem, pathBuilder.toString());
+ }
+
+ @Override
+ public Path resolveSibling(String other) {
+ return resolveSibling(new S3Path(this.getFileSystem(), other));
+ }
+
+ @Override
+ public Path relativize(Path other) {
+ Preconditions.checkArgument(other instanceof S3Path, "other must be an instance of %s", S3Path.class.getName());
+ S3Path s3Path = (S3Path) other;
+
+ if (this.equals(other)) {
+ return new S3Path(this.getFileSystem(), "");
+ }
+
+ Preconditions.checkArgument(isAbsolute(), "Path is already relative: %s", this);
+ Preconditions.checkArgument(s3Path.isAbsolute(), "Cannot relativize against a relative path: %s", s3Path);
+ Preconditions.checkArgument(fileStore.equals(s3Path.getFileStore()), "Cannot relativize paths with different buckets: '%s', '%s'", this, other);
+ // Preconditions.checkArgument(parts.size() <= s3Path.parts.size(), "Cannot relativize against a parent path: '%s', '%s'", this, other);
+
+ String uriPath = decode(URI.create(encode(this.uri)).relativize(URI.create(encode(s3Path.uri))));
+ return new S3Path(fileSystem, uriPath);
+ }
+
+ /**
+ * Examples:
+ *
+ * Relative:
+ * --------
+ * NO use fileSystem and not used fileStore.
+ * - path/file
+ *
+ * Absolute:
+ * --------
+ * Use the fileSystem to get the host and the filestore to get the first path (in the future the filestore can be attached to the host)
+ * http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
+ * - s3://AMAZONACCESSKEY@s3.amazonaws.com/bucket/path/file
+ * - s3://AMAZONACCESSKEY@bucket.s3.amazonaws.com/path/file
+ * - s3://s3-aws-region.amazonaws.com/bucket/path/file
+ *
+ * @return URI never null
+ */
+ @Override
+ public URI toUri() {
+
+ String uri = encode(this.uri);
+ // absolute
+ if (this.isAbsolute()) {
+ StringBuilder builder = new StringBuilder();
+ builder.append(fileSystem.getKey());
+ builder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR);
+ builder.append(uri);
+ return URI.create("s3://" + normalizeURI(builder.toString()));
+ }
+ else {
+ return URI.create(uri);
+ }
+ }
+
+ /**
+ * Get the url for the s3Path.
+ *
+ * The url represents a Uniform Resource
+ * Locator, a pointer to a "resource" on the World
+ * Wide Web.
+ *
+ * All S3Path has a URL if is absolute
+ *
+ * @see com.amazonaws.services.s3.AmazonS3#getUrl(String, String)
+ * @see S3Path#toUri() for unique resource identifier
+ * @return URL or null if is not absoulte
+ */
+ public URL toURL() {
+ if (!this.isAbsolute())
+ return null;
+
+ return this.getFileSystem().getClient().getUrl(this.fileStore.name(), this.getKey());
+ }
+
+ @Override
+ public Path toAbsolutePath() {
+ if (isAbsolute()) {
+ return this;
+ }
+
+ throw new IllegalStateException(format("Relative path cannot be made absolute: %s", this));
+ }
+
+ @Override
+ public Path toRealPath(LinkOption... options) throws IOException {
+ return toAbsolutePath();
+ }
+
+ @Override
+ public File toFile() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public WatchKey register(WatchService watcher, WatchEvent.Kind>[] events, WatchEvent.Modifier... modifiers) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public WatchKey register(WatchService watcher, WatchEvent.Kind>... events) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Iterator iterator() {
+
+ ImmutableList.Builder builder = ImmutableList.builder();
+
+ if (isAbsolute()) {
+ builder.add(new S3Path(fileSystem, PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR));
+ }
+
+ List paths = uriToList();
+
+ if (uriToList().isEmpty())
+ return builder.build().iterator();
+
+ String lastPath = paths.get(paths.size() - 1);
+
+ for (String path : uriToList()) {
+ String pathFinal = path + PATH_SEPARATOR;
+ if (path.equals(lastPath) && !lastPath.endsWith(PATH_SEPARATOR)) {
+ pathFinal = pathFinal.substring(0, pathFinal.length() - 1);
+ }
+ builder.add(new S3Path(fileSystem, pathFinal));
+ }
+
+ return builder.build().iterator();
+ }
+
+ @Override
+ public int compareTo(Path other) {
+ return toString().compareTo(other.toString());
+ }
+
+ @Override
+ public String toString() {
+ return toUri().toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o)
+ return true;
+ if (o == null || getClass() != o.getClass())
+ return false;
+
+ S3Path path = (S3Path) o;
+ if (fileStore != null ? !fileStore.equals(path.fileStore) : path.fileStore != null)
+ return false;
+ if (!uri.equals(path.uri))
+ return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = fileStore != null ? fileStore.name().hashCode() : 0;
+ result = 31 * result + uri.hashCode();
+ return result;
+ }
+
+ /**
+ * Encode special URI characters for path.
+ * @param uri String the uri path
+ * @return String
+ */
+ private String encode(String uri) {
+ // remove special case URI starting with //
+ uri = uri.replace("//", "/");
+ uri = uri.replaceAll(" ", "%20");
+ return uri;
+ }
+
+ /**
+ * Decode uri special characters
+ *
+ * @param uri URI mandatory
+ * @return String decoded
+ */
+ private String decode(URI uri) {
+ try {
+ return URLDecoder.decode(uri.toString(), "UTF-8");
+ }
+ catch (UnsupportedEncodingException e) {
+ throw new IllegalStateException("Error decoding key: " + this.uri, e);
+ }
+ }
+
+ public S3BasicFileAttributes getFileAttributes() {
+ return fileAttributes;
+ }
+
+ public void setFileAttributes(S3BasicFileAttributes fileAttributes) {
+ this.fileAttributes = fileAttributes;
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3ReadOnlySeekableByteChannel.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3ReadOnlySeekableByteChannel.java
new file mode 100644
index 00000000000..d9f0bae4788
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3ReadOnlySeekableByteChannel.java
@@ -0,0 +1,158 @@
+package com.scalableminds.webknossos.datastore.s3fs;
+
+import com.amazonaws.services.s3.model.GetObjectRequest;
+import com.amazonaws.services.s3.model.S3Object;
+import java.io.BufferedInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.NonWritableChannelException;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.SeekableByteChannel;
+import java.nio.file.OpenOption;
+import java.nio.file.ReadOnlyFileSystemException;
+import java.nio.file.StandardOpenOption;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import static java.lang.String.format;
+
+public class S3ReadOnlySeekableByteChannel implements SeekableByteChannel {
+
+ private static final int DEFAULT_BUFFER_SIZE = 64000;
+
+ private S3Path path;
+ private Set extends OpenOption> options;
+ private long length;
+ private ExtBufferedInputStream bufferedStream;
+ private ReadableByteChannel rbc;
+ private long position = 0;
+
+ /**
+ * Open or creates a file, returning a seekable byte channel
+ *
+ * @param path the path open or create
+ * @param options options specifying how the file is opened
+ * @throws IOException if an I/O error occurs
+ */
+ public S3ReadOnlySeekableByteChannel(S3Path path, Set extends OpenOption> options) throws IOException {
+ this.path = path;
+ this.options = Collections.unmodifiableSet(new HashSet<>(options));
+
+ String key = path.getKey();
+ //boolean exists = path.getFileSystem().provider().exists(path);
+
+ //if (!exists) {
+ // throw new NoSuchFileException(format("target not exists: %s", path));
+ //} else if (
+ if (
+ this.options.contains(StandardOpenOption.WRITE) ||
+ this.options.contains(StandardOpenOption.CREATE) ||
+ this.options.contains(StandardOpenOption.CREATE_NEW) ||
+ this.options.contains(StandardOpenOption.APPEND)
+ ) {
+ throw new ReadOnlyFileSystemException();
+ }
+
+ this.length = path
+ .getFileSystem()
+ .getClient()
+ .getObjectMetadata(
+ path
+ .getFileStore()
+ .name(),
+ key
+ )
+ .getContentLength();
+ openStreamAt(0);
+ }
+
+ private void openStreamAt(long position) throws IOException {
+ if (rbc != null) {
+ rbc.close();
+ }
+
+ GetObjectRequest rangeObjectRequest =
+ new GetObjectRequest(
+ path.getFileStore().name(),
+ path.getKey()
+ )
+ .withRange(position);
+
+ S3Object s3Object =
+ path
+ .getFileSystem()
+ .getClient()
+ .getObject(rangeObjectRequest);
+
+ bufferedStream =
+ new ExtBufferedInputStream(
+ s3Object.getObjectContent(),
+ DEFAULT_BUFFER_SIZE
+ );
+
+ rbc = Channels.newChannel(bufferedStream);
+ this.position = position;
+ }
+
+ public boolean isOpen() {
+ return rbc.isOpen();
+ }
+
+ public long position() { return position; }
+
+ public SeekableByteChannel position(long targetPosition)
+ throws IOException
+ {
+ long offset = targetPosition - position();
+ if (offset > 0 && offset < bufferedStream.getBytesInBufferAvailable()) {
+ long skipped = bufferedStream.skip(offset);
+ if (skipped != offset) {
+ // shouldn't happen since we are within the buffer
+ throw new IOException("Could not seek to " + targetPosition);
+ }
+ position += offset;
+ } else if (offset != 0) {
+ openStreamAt(targetPosition);
+ }
+ return this;
+ }
+
+ public int read(ByteBuffer dst) throws IOException {
+ int n = rbc.read(dst);
+ if (n > 0) {
+ position += n;
+ }
+ return n;
+ }
+
+ public SeekableByteChannel truncate(long size) {
+ throw new NonWritableChannelException();
+ }
+
+ public int write (ByteBuffer src) {
+ throw new NonWritableChannelException();
+ }
+
+ public long size() {
+ return length;
+ }
+
+ public void close() throws IOException {
+ rbc.close();
+ }
+
+ private class ExtBufferedInputStream extends BufferedInputStream {
+ private ExtBufferedInputStream(final InputStream inputStream, final int size) {
+ super(inputStream, size);
+ }
+
+ /** Returns the number of bytes that can be read from the buffer without reading more into the buffer. */
+ int getBytesInBufferAvailable() {
+ if (this.count == this.pos) return 0;
+ else return this.buf.length - this.pos;
+ }
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java
new file mode 100644
index 00000000000..4d4ff0afbb9
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java
@@ -0,0 +1,145 @@
+package com.scalableminds.webknossos.datastore.s3fs;
+
+import static java.lang.String.format;
+
+import java.io.BufferedInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.SeekableByteChannel;
+import java.nio.file.*;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.tika.Tika;
+
+import com.amazonaws.services.s3.model.ObjectMetadata;
+import com.amazonaws.services.s3.model.S3Object;
+
+public class S3SeekableByteChannel implements SeekableByteChannel {
+
+ private S3Path path;
+ private Set extends OpenOption> options;
+ private SeekableByteChannel seekable;
+ private Path tempFile;
+
+ /**
+ * Open or creates a file, returning a seekable byte channel
+ *
+ * @param path the path open or create
+ * @param options options specifying how the file is opened
+ * @throws IOException if an I/O error occurs
+ */
+ public S3SeekableByteChannel(S3Path path, Set extends OpenOption> options) throws IOException {
+ this.path = path;
+ this.options = Collections.unmodifiableSet(new HashSet<>(options));
+ String key = path.getKey();
+ boolean exists = path.getFileSystem().provider().exists(path);
+
+ if (exists && this.options.contains(StandardOpenOption.CREATE_NEW))
+ throw new FileAlreadyExistsException(format("target already exists: %s", path));
+ else if (!exists && !this.options.contains(StandardOpenOption.CREATE_NEW) &&
+ !this.options.contains(StandardOpenOption.CREATE))
+ throw new NoSuchFileException(format("target not exists: %s", path));
+
+ tempFile = Files.createTempFile("temp-s3-", key.replaceAll("/", "_"));
+ boolean removeTempFile = true;
+ try {
+ if (exists) {
+ try (S3Object object = path.getFileSystem()
+ .getClient()
+ .getObject(path.getFileStore().getBucket().getName(), key)) {
+ Files.copy(object.getObjectContent(), tempFile, StandardCopyOption.REPLACE_EXISTING);
+ }
+ }
+
+ Set extends OpenOption> seekOptions = new HashSet<>(this.options);
+ seekOptions.remove(StandardOpenOption.CREATE_NEW);
+ seekable = Files.newByteChannel(tempFile, seekOptions);
+ removeTempFile = false;
+ } finally {
+ if (removeTempFile) {
+ Files.deleteIfExists(tempFile);
+ }
+ }
+ }
+
+ @Override
+ public boolean isOpen() {
+ return seekable.isOpen();
+ }
+
+ @Override
+ public void close() throws IOException {
+ try {
+ if (!seekable.isOpen())
+ return;
+
+ seekable.close();
+
+ if (options.contains(StandardOpenOption.DELETE_ON_CLOSE)) {
+ path.getFileSystem().provider().delete(path);
+ return;
+ }
+
+ if (options.contains(StandardOpenOption.READ) && options.size() == 1) {
+ return;
+ }
+
+ sync();
+
+ } finally {
+ Files.deleteIfExists(tempFile);
+ }
+ }
+
+ /**
+ * try to sync the temp file with the remote s3 path.
+ *
+ * @throws IOException if the tempFile fails to open a newInputStream
+ */
+ protected void sync() throws IOException {
+ try (InputStream stream = new BufferedInputStream(Files.newInputStream(tempFile))) {
+ ObjectMetadata metadata = new ObjectMetadata();
+ metadata.setContentLength(Files.size(tempFile));
+ if (path.getFileName() != null) {
+ metadata.setContentType(new Tika().detect(stream, path.getFileName().toString()));
+ }
+
+ String bucket = path.getFileStore().name();
+ String key = path.getKey();
+ path.getFileSystem().getClient().putObject(bucket, key, stream, metadata);
+ }
+ }
+
+ @Override
+ public int write(ByteBuffer src) throws IOException {
+ return seekable.write(src);
+ }
+
+ @Override
+ public SeekableByteChannel truncate(long size) throws IOException {
+ return seekable.truncate(size);
+ }
+
+ @Override
+ public long size() throws IOException {
+ return seekable.size();
+ }
+
+ @Override
+ public int read(ByteBuffer dst) throws IOException {
+ return seekable.read(dst);
+ }
+
+ @Override
+ public SeekableByteChannel position(long newPosition) throws IOException {
+ return seekable.position(newPosition);
+ }
+
+ @Override
+ public long position() throws IOException {
+ return seekable.position();
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributeView.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributeView.java
new file mode 100644
index 00000000000..80d0b62240e
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributeView.java
@@ -0,0 +1,33 @@
+package com.scalableminds.webknossos.datastore.s3fs.attribute;
+
+import com.scalableminds.webknossos.datastore.s3fs.S3Path;
+
+import java.io.IOException;
+import java.nio.file.attribute.BasicFileAttributeView;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.FileTime;
+
+
+public class S3BasicFileAttributeView implements BasicFileAttributeView {
+
+ private S3Path s3Path;
+
+ public S3BasicFileAttributeView(S3Path s3Path) {
+ this.s3Path = s3Path;
+ }
+
+ @Override
+ public String name() {
+ return "basic";
+ }
+
+ @Override
+ public BasicFileAttributes readAttributes() throws IOException {
+ return s3Path.getFileSystem().provider().readAttributes(s3Path, BasicFileAttributes.class);
+ }
+
+ @Override
+ public void setTimes(FileTime lastModifiedTime, FileTime lastAccessTime, FileTime createTime) throws IOException {
+ // not implemented
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributes.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributes.java
new file mode 100644
index 00000000000..99318debfb8
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributes.java
@@ -0,0 +1,85 @@
+package com.scalableminds.webknossos.datastore.s3fs.attribute;
+
+import static java.lang.String.format;
+
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.FileTime;
+
+public class S3BasicFileAttributes implements BasicFileAttributes {
+ private final FileTime lastModifiedTime;
+ private final long size;
+ private final boolean directory;
+ private final boolean regularFile;
+ private final String key;
+ private long cacheCreated;
+
+ public S3BasicFileAttributes(String key, FileTime lastModifiedTime, long size, boolean isDirectory, boolean isRegularFile) {
+ this.key = key;
+ this.lastModifiedTime = lastModifiedTime;
+ this.size = size;
+ this.directory = isDirectory;
+ this.regularFile = isRegularFile;
+
+ this.cacheCreated = System.currentTimeMillis();
+ }
+
+ @Override
+ public FileTime lastModifiedTime() {
+ return lastModifiedTime;
+ }
+
+ @Override
+ public FileTime lastAccessTime() {
+ return lastModifiedTime;
+ }
+
+ @Override
+ public FileTime creationTime() {
+ return lastModifiedTime;
+ }
+
+ @Override
+ public boolean isRegularFile() {
+ return regularFile;
+ }
+
+ @Override
+ public boolean isDirectory() {
+ return directory;
+ }
+
+ @Override
+ public boolean isSymbolicLink() {
+ return false;
+ }
+
+ @Override
+ public boolean isOther() {
+ return false;
+ }
+
+ @Override
+ public long size() {
+ return size;
+ }
+
+ @Override
+ public Object fileKey() {
+ return key;
+ }
+
+ @Override
+ public String toString() {
+ return format("[%s: lastModified=%s, size=%s, isDirectory=%s, isRegularFile=%s]", key, lastModifiedTime, size, directory, regularFile);
+ }
+
+ public long getCacheCreated() {
+ return cacheCreated;
+ }
+
+ // for testing
+
+ public void setCacheCreated(long time) {
+ this.cacheCreated = time;
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributeView.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributeView.java
new file mode 100644
index 00000000000..ae25ad465e8
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributeView.java
@@ -0,0 +1,61 @@
+package com.scalableminds.webknossos.datastore.s3fs.attribute;
+
+import com.scalableminds.webknossos.datastore.s3fs.S3Path;
+
+import java.io.IOException;
+import java.nio.file.attribute.*;
+import java.util.Set;
+
+
+public class S3PosixFileAttributeView implements PosixFileAttributeView {
+
+ private S3Path s3Path;
+ private PosixFileAttributes posixFileAttributes;
+
+ public S3PosixFileAttributeView(S3Path s3Path) {
+ this.s3Path = s3Path;
+ }
+
+ @Override
+ public String name() {
+ return "posix";
+ }
+
+ @Override
+ public PosixFileAttributes readAttributes() throws IOException {
+ return read();
+ }
+
+ @Override
+ public UserPrincipal getOwner() throws IOException {
+ return read().owner();
+ }
+
+ @Override
+ public void setOwner(UserPrincipal owner) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setPermissions(Set perms) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setGroup(GroupPrincipal group) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setTimes(FileTime lastModifiedTime, FileTime lastAccessTime, FileTime createTime) throws IOException {
+ // not implemented
+ }
+
+ public PosixFileAttributes read() throws IOException {
+ if (posixFileAttributes == null) {
+ posixFileAttributes = s3Path.getFileSystem()
+ .provider().readAttributes(s3Path, PosixFileAttributes.class);
+ }
+ return posixFileAttributes;
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributes.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributes.java
new file mode 100644
index 00000000000..d00d332c0d7
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributes.java
@@ -0,0 +1,37 @@
+package com.scalableminds.webknossos.datastore.s3fs.attribute;
+
+import java.nio.file.attribute.*;
+import java.util.Set;
+
+import static java.lang.String.format;
+
+public class S3PosixFileAttributes extends S3BasicFileAttributes implements PosixFileAttributes {
+
+ private UserPrincipal userPrincipal;
+ private GroupPrincipal groupPrincipal;
+ private Set posixFilePermissions;
+
+ public S3PosixFileAttributes(String key, FileTime lastModifiedTime, long size, boolean isDirectory, boolean isRegularFile, UserPrincipal userPrincipal, GroupPrincipal groupPrincipal, Set posixFilePermissionSet) {
+
+ super(key, lastModifiedTime, size, isDirectory, isRegularFile);
+
+ this.userPrincipal = userPrincipal;
+ this.groupPrincipal = groupPrincipal;
+ this.posixFilePermissions = posixFilePermissionSet;
+ }
+
+ @Override
+ public UserPrincipal owner() {
+ return this.userPrincipal;
+ }
+
+ @Override
+ public GroupPrincipal group() {
+ return this.groupPrincipal;
+ }
+
+ @Override
+ public Set permissions() {
+ return this.posixFilePermissions;
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3UserPrincipal.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3UserPrincipal.java
new file mode 100644
index 00000000000..5a0bacc178c
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3UserPrincipal.java
@@ -0,0 +1,17 @@
+package com.scalableminds.webknossos.datastore.s3fs.attribute;
+
+import java.nio.file.attribute.UserPrincipal;
+
+public class S3UserPrincipal implements UserPrincipal {
+
+ private String name;
+
+ public S3UserPrincipal(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AnonymousAWSCredentialsProvider.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AnonymousAWSCredentialsProvider.java
new file mode 100644
index 00000000000..513431a8ff6
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AnonymousAWSCredentialsProvider.java
@@ -0,0 +1,18 @@
+package com.scalableminds.webknossos.datastore.s3fs.util;
+
+import com.amazonaws.auth.AWSCredentials;
+import com.amazonaws.auth.AWSCredentialsProvider;
+import com.amazonaws.auth.AnonymousAWSCredentials;
+
+public class AnonymousAWSCredentialsProvider implements AWSCredentialsProvider {
+
+ @Override
+ public AWSCredentials getCredentials() {
+ return new AnonymousAWSCredentials();
+ }
+
+ @Override
+ public void refresh() {
+
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AttributesUtils.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AttributesUtils.java
new file mode 100644
index 00000000000..12a4983023c
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AttributesUtils.java
@@ -0,0 +1,100 @@
+package com.scalableminds.webknossos.datastore.s3fs.util;
+
+
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.PosixFileAttributes;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Utilities to help transforming BasicFileAttributes to Map
+ */
+public abstract class AttributesUtils {
+
+ /**
+ * Given a BasicFileAttributes not null then return a Map
+ * with the keys as the fields of the BasicFileAttributes or PosixFileAttributes and the values
+ * with the content of the fields
+ *
+ * @param attr BasicFileAttributes
+ * @return Map String Object never null
+ */
+ public static Map fileAttributeToMap(BasicFileAttributes attr) {
+ Map result = new HashMap<>();
+ result.put("creationTime", attr.creationTime());
+ result.put("fileKey", attr.fileKey());
+ result.put("isDirectory", attr.isDirectory());
+ result.put("isOther", attr.isOther());
+ result.put("isRegularFile", attr.isRegularFile());
+ result.put("isSymbolicLink", attr.isSymbolicLink());
+ result.put("lastAccessTime", attr.lastAccessTime());
+ result.put("lastModifiedTime", attr.lastModifiedTime());
+ result.put("size", attr.size());
+
+ if (attr instanceof PosixFileAttributes) {
+ PosixFileAttributes posixAttr = (PosixFileAttributes) attr;
+ result.put("permissions", posixAttr.permissions());
+ result.put("owner", posixAttr.owner());
+ result.put("group", posixAttr.group());
+ }
+
+ return result;
+ }
+
+ /**
+ * transform the java.nio.file.attribute.BasicFileAttributes to Map filtering by the keys
+ * given in the filters param
+ *
+ * @param attr BasicFileAttributes not null to tranform to map
+ * @param filters String[] filters
+ * @return Map String Object with the same keys as the filters
+ */
+ public static Map fileAttributeToMap(BasicFileAttributes attr, String[] filters) {
+ Map result = new HashMap<>();
+
+ for (String filter : filters) {
+ filter = filter.replace("basic:", "");
+ filter = filter.replace("posix:", "");
+ switch (filter) {
+ case "creationTime":
+ result.put("creationTime", attr.creationTime());
+ break;
+ case "fileKey":
+ result.put("fileKey", attr.fileKey());
+ break;
+ case "isDirectory":
+ result.put("isDirectory", attr.isDirectory());
+ break;
+ case "isOther":
+ result.put("isOther", attr.isOther());
+ break;
+ case "isRegularFile":
+ result.put("isRegularFile", attr.isRegularFile());
+ break;
+ case "isSymbolicLink":
+ result.put("isSymbolicLink", attr.isSymbolicLink());
+ break;
+ case "lastAccessTime":
+ result.put("lastAccessTime", attr.lastAccessTime());
+ break;
+ case "lastModifiedTime":
+ result.put("lastModifiedTime", attr.lastModifiedTime());
+ break;
+ case "size":
+ result.put("size", attr.size());
+ break;
+ case "permissions":
+ result.put("permissions", ((PosixFileAttributes)attr).permissions());
+ break;
+ case "group":
+ result.put("group", ((PosixFileAttributes)attr).group());
+ break;
+ case "owner":
+ result.put("owner", ((PosixFileAttributes)attr).owner());
+ break;
+ }
+ }
+
+ return result;
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/Cache.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/Cache.java
new file mode 100644
index 00000000000..7ca13ab00a0
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/Cache.java
@@ -0,0 +1,29 @@
+package com.scalableminds.webknossos.datastore.s3fs.util;
+
+import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes;
+
+public class Cache {
+
+ /**
+ * check if the cache of the S3FileAttributes is still valid
+ *
+ * @param cache int cache time of the fileAttributes in milliseconds
+ * @param fileAttributes S3FileAttributes to check if is still valid, can be null
+ * @return true or false, if cache are -1 and fileAttributes are not null then always return true
+ */
+ public boolean isInTime(int cache, S3BasicFileAttributes fileAttributes) {
+ if (fileAttributes == null) {
+ return false;
+ }
+
+ if (cache == -1) {
+ return true;
+ }
+
+ return getCurrentTime() - cache <= fileAttributes.getCacheCreated();
+ }
+
+ public long getCurrentTime() {
+ return System.currentTimeMillis();
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/IOUtils.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/IOUtils.java
new file mode 100644
index 00000000000..65d15be9e53
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/IOUtils.java
@@ -0,0 +1,32 @@
+package com.scalableminds.webknossos.datastore.s3fs.util;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Utilities for streams
+ */
+public abstract class IOUtils {
+ /**
+ * get the stream content and return as a byte array
+ *
+ * @param is InputStream
+ * @return byte array
+ * @throws IOException if the stream is closed
+ */
+ public static byte[] toByteArray(InputStream is) throws IOException {
+ ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+
+ int nRead;
+ byte[] data = new byte[16384];
+
+ while ((nRead = is.read(data, 0, data.length)) != -1) {
+ buffer.write(data, 0, nRead);
+ }
+
+ buffer.flush();
+
+ return buffer.toByteArray();
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/S3Utils.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/S3Utils.java
new file mode 100644
index 00000000000..0716563ad8b
--- /dev/null
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/S3Utils.java
@@ -0,0 +1,198 @@
+package com.scalableminds.webknossos.datastore.s3fs.util;
+
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.model.*;
+import com.google.common.collect.Sets;
+import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes;
+import com.scalableminds.webknossos.datastore.s3fs.attribute.S3PosixFileAttributes;
+import com.scalableminds.webknossos.datastore.s3fs.attribute.S3UserPrincipal;
+import com.scalableminds.webknossos.datastore.s3fs.S3Path;
+
+import java.nio.file.NoSuchFileException;
+import java.nio.file.attribute.FileTime;
+import java.nio.file.attribute.PosixFilePermission;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Utilities to work with Amazon S3 Objects.
+ */
+public class S3Utils {
+
+ /**
+ * Get the {@link S3ObjectSummary} that represent this Path or her first child if this path not exists
+ *
+ * @param s3Path {@link S3Path}
+ * @return {@link S3ObjectSummary}
+ * @throws NoSuchFileException if not found the path and any child
+ */
+ public S3ObjectSummary getS3ObjectSummary(S3Path s3Path) throws NoSuchFileException {
+ String key = s3Path.getKey();
+ String bucketName = s3Path.getFileStore().name();
+ AmazonS3 client = s3Path.getFileSystem().getClient();
+ // try to find the element with the current key (maybe with end slash or maybe not.)
+ try {
+ ObjectMetadata metadata = client.getObjectMetadata(bucketName, key);
+ S3ObjectSummary result = new S3ObjectSummary();
+ result.setBucketName(bucketName);
+ result.setETag(metadata.getETag());
+ result.setKey(key);
+ result.setLastModified(metadata.getLastModified());
+ result.setSize(metadata.getContentLength());
+ try {
+ AccessControlList objectAcl = client.getObjectAcl(bucketName, key);
+ result.setOwner(objectAcl.getOwner());
+ } catch (AmazonS3Exception e) {
+ // If we don't have permission to view the ACL, that's fine, we can leave `owner` empty
+ if (e.getStatusCode() != 403)
+ throw e;
+ }
+ return result;
+ } catch (AmazonS3Exception e) {
+ if (e.getStatusCode() != 404)
+ throw e;
+ }
+
+ // if not found (404 err) with the original key.
+ // try to find the elment as a directory.
+ try {
+ // is a virtual directory
+ ListObjectsRequest request = new ListObjectsRequest();
+ request.setBucketName(bucketName);
+ String keyFolder = key;
+ if (!keyFolder.endsWith("/")) {
+ keyFolder += "/";
+ }
+ request.setPrefix(keyFolder);
+ request.setMaxKeys(1);
+ ObjectListing current = client.listObjects(request);
+ if (!current.getObjectSummaries().isEmpty())
+ return current.getObjectSummaries().get(0);
+ } catch (Exception e) {
+ //
+ }
+ throw new NoSuchFileException(bucketName + S3Path.PATH_SEPARATOR + key);
+ }
+
+ /**
+ * getS3FileAttributes for the s3Path
+ *
+ * @param s3Path S3Path mandatory not null
+ * @return S3FileAttributes never null
+ */
+ public S3BasicFileAttributes getS3FileAttributes(S3Path s3Path) throws NoSuchFileException {
+ S3ObjectSummary objectSummary = getS3ObjectSummary(s3Path);
+ return toS3FileAttributes(objectSummary, s3Path.getKey());
+ }
+
+ /**
+ * get the S3PosixFileAttributes for a S3Path
+ * @param s3Path Path mandatory not null
+ * @return S3PosixFileAttributes never null
+ * @throws NoSuchFileException if the Path doesnt exists
+ */
+ public S3PosixFileAttributes getS3PosixFileAttributes(S3Path s3Path) throws NoSuchFileException {
+ S3ObjectSummary objectSummary = getS3ObjectSummary(s3Path);
+
+ String key = s3Path.getKey();
+ String bucketName = s3Path.getFileStore().name();
+
+ S3BasicFileAttributes attrs = toS3FileAttributes(objectSummary, key);
+ S3UserPrincipal userPrincipal = null;
+ Set permissions = null;
+
+ if (!attrs.isDirectory()) {
+ AmazonS3 client = s3Path.getFileSystem().getClient();
+ AccessControlList acl = client.getObjectAcl(bucketName, key);
+ Owner owner = acl.getOwner();
+
+ userPrincipal = new S3UserPrincipal(owner.getId() + ":" + owner.getDisplayName());
+ permissions = toPosixFilePermissions(acl.getGrantsAsList());
+ }
+
+ return new S3PosixFileAttributes((String)attrs.fileKey(), attrs.lastModifiedTime(),
+ attrs.size(), attrs.isDirectory(), attrs.isRegularFile(), userPrincipal, null, permissions);
+ }
+
+
+ /**
+ * transform com.amazonaws.services.s3.model.Grant to java.nio.file.attribute.PosixFilePermission
+ * @see #toPosixFilePermission(Permission)
+ * @param grants Set grants mandatory, must be not null
+ * @return Set PosixFilePermission never null
+ */
+ public Set toPosixFilePermissions(List grants) {
+ Set filePermissions = new HashSet<>();
+ for (Grant grant : grants) {
+ filePermissions.addAll(toPosixFilePermission(grant.getPermission()));
+ }
+
+ return filePermissions;
+ }
+
+ /**
+ * transform a com.amazonaws.services.s3.model.Permission to a java.nio.file.attribute.PosixFilePermission
+ * We use the follow rules:
+ * - transform only to the Owner permission, S3 doesnt have concepts like owner, group or other so we map only to owner.
+ * - ACP is a special permission: WriteAcp are mapped to Owner execute permission and ReadAcp are mapped to owner read
+ * @param permission Permission to map, mandatory must be not null
+ * @return Set PosixFilePermission never null
+ */
+ public Set toPosixFilePermission(Permission permission){
+ switch (permission) {
+ case FullControl:
+ return Sets.newHashSet(PosixFilePermission.OWNER_EXECUTE,
+ PosixFilePermission.OWNER_READ,
+ PosixFilePermission.OWNER_WRITE);
+ case Write:
+ return Sets.newHashSet(PosixFilePermission.OWNER_WRITE);
+ case Read:
+ return Sets.newHashSet(PosixFilePermission.OWNER_READ);
+ case ReadAcp:
+ return Sets.newHashSet(PosixFilePermission.OWNER_READ);
+ case WriteAcp:
+ return Sets.newHashSet(PosixFilePermission.OWNER_EXECUTE);
+ }
+ throw new IllegalStateException("Unknown Permission: " + permission);
+ }
+
+ /**
+ * transform S3ObjectSummary to S3FileAttributes
+ *
+ * @param objectSummary S3ObjectSummary mandatory not null, the real objectSummary with
+ * exactly the same key than the key param or the immediate descendant
+ * if it is a virtual directory
+ * @param key String the real key that can be exactly equal than the objectSummary or
+ * @return S3FileAttributes
+ */
+ public S3BasicFileAttributes toS3FileAttributes(S3ObjectSummary objectSummary, String key) {
+ // parse the data to BasicFileAttributes.
+ FileTime lastModifiedTime = null;
+ if (objectSummary.getLastModified() != null) {
+ lastModifiedTime = FileTime.from(objectSummary.getLastModified().getTime(), TimeUnit.MILLISECONDS);
+ }
+ long size = objectSummary.getSize();
+ boolean directory = false;
+ boolean regularFile = false;
+ String resolvedKey = objectSummary.getKey();
+ // check if is a directory and exists the key of this directory at amazon s3
+ if (key.endsWith("/") && resolvedKey.equals(key) ||
+ resolvedKey.equals(key + "/")) {
+ directory = true;
+ } else if (key.isEmpty()) { // is a bucket (no key)
+ directory = true;
+ resolvedKey = "/";
+ } else if (!resolvedKey.equals(key) && resolvedKey.startsWith(key)) { // is a directory but not exists at amazon s3
+ directory = true;
+ // no metadata, we fake one
+ size = 0;
+ // delete extra part
+ resolvedKey = key + "/";
+ } else {
+ regularFile = true;
+ }
+ return new S3BasicFileAttributes(resolvedKey, lastModifiedTime, size, directory, regularFile);
+ }
+}
diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala
index 0091e7cecec..ff41d7cf9b5 100644
--- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala
+++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala
@@ -5,10 +5,10 @@ import java.net.URI
import java.nio.file.spi.FileSystemProvider
import java.nio.file.{FileSystem, FileSystemAlreadyExistsException, FileSystems}
import java.util.ServiceLoader
-
import com.google.common.collect.ImmutableMap
import com.scalableminds.util.cache.LRUConcurrentCache
import com.scalableminds.webknossos.datastore.dataformats.zarr.RemoteSourceDescriptor
+import com.scalableminds.webknossos.datastore.s3fs.AmazonS3Factory
import com.typesafe.scalalogging.LazyLogging
import scala.collection.JavaConverters._
@@ -77,10 +77,10 @@ object FileSystemsHolder extends LazyLogging {
if (scheme == schemeS3) {
ImmutableMap
.builder[String, Any]
- .put(com.upplication.s3fs.AmazonS3Factory.ACCESS_KEY, user)
- .put(com.upplication.s3fs.AmazonS3Factory.SECRET_KEY, password)
+ .put(AmazonS3Factory.ACCESS_KEY, user)
+ .put(AmazonS3Factory.SECRET_KEY, password)
.build
- } else if (scheme == schemeHttps) {
+ } else if (scheme == schemeHttps || scheme == schemeHttp) {
ImmutableMap.builder[String, Any].put("user", user).put("password", password).build
} else emptyEnv
}).getOrElse(emptyEnv)
diff --git a/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider b/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider
index 3934e340ed9..6739b09b3c5 100644
--- a/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider
+++ b/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider
@@ -1,3 +1,3 @@
-com.upplication.s3fs.S3FileSystemProvider
+com.scalableminds.webknossos.datastore.s3fs.S3FileSystemProvider
com.scalableminds.webknossos.datastore.storage.httpsfilesystem.HttpsFileSystemProvider
com.scalableminds.webknossos.datastore.storage.httpsfilesystem.HttpFileSystemProvider