mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-11-24 02:59:13 +08:00
refactoring
This commit is contained in:
parent
1bfb96f34d
commit
adf12c8825
@ -53,36 +53,44 @@ public class SeaweedRead {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
HttpClient client = HttpClientBuilder.create().build();
|
int len = readChunkView(position, buffer, startOffset, chunkView, locations);
|
||||||
HttpGet request = new HttpGet(
|
|
||||||
String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
|
|
||||||
|
|
||||||
if (!chunkView.isFullChunk){
|
readCount += len;
|
||||||
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
|
startOffset += len;
|
||||||
request.setHeader(HttpHeaders.RANGE,
|
|
||||||
String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size));
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
HttpResponse response = client.execute(request);
|
|
||||||
HttpEntity entity = response.getEntity();
|
|
||||||
|
|
||||||
int len = (int) (chunkView.logicOffset - position + chunkView.size);
|
|
||||||
OutputStream outputStream = new ByteBufferOutputStream(ByteBuffer.wrap(buffer, startOffset, len));
|
|
||||||
entity.writeTo(outputStream);
|
|
||||||
LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len);
|
|
||||||
|
|
||||||
readCount += len;
|
|
||||||
startOffset += len;
|
|
||||||
|
|
||||||
} catch (IOException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return readCount;
|
return readCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) {
|
||||||
|
HttpClient client = HttpClientBuilder.create().build();
|
||||||
|
HttpGet request = new HttpGet(
|
||||||
|
String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
|
||||||
|
|
||||||
|
if (!chunkView.isFullChunk) {
|
||||||
|
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
|
||||||
|
request.setHeader(HttpHeaders.RANGE,
|
||||||
|
String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size));
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
HttpResponse response = client.execute(request);
|
||||||
|
HttpEntity entity = response.getEntity();
|
||||||
|
|
||||||
|
int len = (int) (chunkView.logicOffset - position + chunkView.size);
|
||||||
|
OutputStream outputStream = new ByteBufferOutputStream(ByteBuffer.wrap(buffer, startOffset, len));
|
||||||
|
entity.writeTo(outputStream);
|
||||||
|
LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len);
|
||||||
|
|
||||||
|
return len;
|
||||||
|
|
||||||
|
} catch (IOException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
public static List<ChunkView> viewFromVisibles(List<VisibleInterval> visibleIntervals, long offset, long size) {
|
public static List<ChunkView> viewFromVisibles(List<VisibleInterval> visibleIntervals, long offset, long size) {
|
||||||
List<ChunkView> views = new ArrayList<>();
|
List<ChunkView> views = new ArrayList<>();
|
||||||
|
|
||||||
|
@ -1,18 +1,9 @@
|
|||||||
package seaweedfs.hdfs;
|
package seaweedfs.hdfs;
|
||||||
|
|
||||||
import org.apache.http.Header;
|
|
||||||
import org.apache.http.HttpEntity;
|
|
||||||
import org.apache.http.HttpHeaders;
|
|
||||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
|
||||||
import org.apache.http.client.methods.HttpGet;
|
|
||||||
import org.apache.http.impl.client.CloseableHttpClient;
|
|
||||||
import org.apache.http.impl.client.HttpClientBuilder;
|
|
||||||
import org.apache.http.util.EntityUtils;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import seaweed.hdfs.SeaweedRead;
|
import seaweed.hdfs.SeaweedRead;
|
||||||
import seaweedfs.client.FilerProto;
|
import seaweedfs.client.FilerProto;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
@ -72,36 +63,4 @@ public class SeaweedReadTest {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// test gzipped content with range requests. Conclusion: not doing this.
|
|
||||||
public void testGzippedRangeRequest() throws IOException {
|
|
||||||
|
|
||||||
SeaweedRead.ChunkView chunkView = new SeaweedRead.ChunkView("2,621a042be6e39d", 0, 28, 0);
|
|
||||||
CloseableHttpClient client = HttpClientBuilder.create().build();
|
|
||||||
String targetUrl = String.format("http://%s/%s", "localhost:8080", chunkView.fileId);
|
|
||||||
HttpGet request = new HttpGet(targetUrl);
|
|
||||||
// request.removeHeaders(HttpHeaders.ACCEPT_ENCODING);
|
|
||||||
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
|
|
||||||
request.setHeader(HttpHeaders.RANGE, String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size));
|
|
||||||
System.out.println("request:");
|
|
||||||
for (Header header : request.getAllHeaders()) {
|
|
||||||
System.out.println(header.getName() + ": " + header.getValue());
|
|
||||||
}
|
|
||||||
|
|
||||||
int len = 29;
|
|
||||||
byte[] buffer = new byte[len];
|
|
||||||
CloseableHttpResponse response = null;
|
|
||||||
try {
|
|
||||||
response = client.execute(request);
|
|
||||||
HttpEntity entity = response.getEntity();
|
|
||||||
System.out.println("content length:" + entity.getContentLength());
|
|
||||||
System.out.println("is streaming:" + entity.isStreaming());
|
|
||||||
System.out.println(EntityUtils.toString(entity));
|
|
||||||
} finally {
|
|
||||||
|
|
||||||
if (response != null) {
|
|
||||||
response.close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user