refactoring

This commit is contained in:
Chris Lu 2018-12-27 23:29:51 -08:00
parent 68eacc2226
commit 319ab6d98f
5 changed files with 29 additions and 13 deletions

View File

@ -0,0 +1,21 @@
package seaweed.hdfs;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
public class ByteBufferOutputStream extends OutputStream {
private final ByteBuffer buf;
public ByteBufferOutputStream(ByteBuffer buf) {
this.buf = buf;
}
public void write(int b) throws IOException {
this.buf.put((byte)b);
}
public void write(byte[] b, int off, int len) throws IOException {
this.buf.put(b, off, len);
}
}

View File

@ -24,6 +24,8 @@ import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
public class SeaweedOutputStream extends OutputStream implements Syncable, StreamCapabilities {
private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class);
@ -79,7 +81,7 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry);
try {
SeaweedWrite.writeMeta(filerGrpcClient, path, entry);
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
} catch (Exception ex) {
throw new IOException(ex);
}

View File

@ -1,14 +1,11 @@
package seaweed.hdfs;
import org.apache.hadoop.hdfs.util.ByteBufferOutputStream;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHeaders;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.HttpClientBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.FilerProto;
@ -23,7 +20,7 @@ import java.util.Map;
public class SeaweedRead {
private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
// private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
// returns bytesRead
public static long read(FilerGrpcClient filerGrpcClient, List<VisibleInterval> visibleIntervals,
@ -81,7 +78,7 @@ public class SeaweedRead {
int len = (int) (chunkView.logicOffset - position + chunkView.size);
OutputStream outputStream = new ByteBufferOutputStream(ByteBuffer.wrap(buffer, startOffset, len));
entity.writeTo(outputStream);
LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len);
// LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len);
return len;

View File

@ -1,6 +1,5 @@
package seaweed.hdfs;
import org.apache.hadoop.fs.Path;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.mime.HttpMultipartMode;
@ -14,8 +13,6 @@ import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
public class SeaweedWrite {
public static void writeData(FilerProto.Entry.Builder entry,
@ -49,10 +46,10 @@ public class SeaweedWrite {
}
public static void writeMeta(final FilerGrpcClient filerGrpcClient,
final Path path, final FilerProto.Entry.Builder entry) {
final String parentDirectory, final FilerProto.Entry.Builder entry) {
filerGrpcClient.getBlockingStub().createEntry(
FilerProto.CreateEntryRequest.newBuilder()
.setDirectory(getParentDirectory(path))
.setDirectory(parentDirectory)
.setEntry(entry)
.build()
);

View File

@ -1,7 +1,6 @@
package seaweedfs.hdfs;
package seaweed.hdfs;
import org.junit.Test;
import seaweed.hdfs.SeaweedRead;
import seaweedfs.client.FilerProto;
import java.util.ArrayList;