avoid range request for gzipped content

This commit is contained in:
Chris Lu 2018-12-07 01:23:30 -08:00
parent 6b39effe7f
commit 6946c51430
2 changed files with 10 additions and 4 deletions

View File

@ -72,7 +72,7 @@
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpmime</artifactId>
<version>4.5.2</version>
<version>4.5.6</version>
</dependency>
<dependency>
<groupId>junit</groupId>

View File

@ -1,6 +1,8 @@
package seaweed.hdfs;
import org.apache.hadoop.hdfs.util.ByteBufferOutputStream;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHeaders;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
@ -11,6 +13,8 @@ import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.FilerProto;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
@ -52,7 +56,8 @@ public class SeaweedRead {
HttpClient client = HttpClientBuilder.create().build();
HttpGet request = new HttpGet(
String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
request.setHeader("Range",
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
request.setHeader(HttpHeaders.RANGE,
String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size));
try {
@ -60,9 +65,10 @@ public class SeaweedRead {
HttpEntity entity = response.getEntity();
int len = (int) (chunkView.logicOffset - position + chunkView.size);
int chunReadCount = entity.getContent().read(buffer, startOffset, len);
OutputStream outputStream = new ByteBufferOutputStream(ByteBuffer.wrap(buffer, startOffset, len));
entity.writeTo(outputStream);
LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len);
LOG.debug("* read chunkView:{} startOffset:{} length:{} chunReadCount:{}", chunkView, startOffset, len, chunReadCount);
readCount += len;
startOffset += len;